diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..10a9953d --- /dev/null +++ b/.flake8 @@ -0,0 +1,38 @@ +[flake8] +exclude = + .git, + .venv, + __pycache__, + *_template.py, + */migrations/*, + */node_modules/*, + k8s/kubesphere, + k8s/cluster/global, + .generated, + docker, + local, + config, + src/core/workspace, + src/core/old-workspace, + src/airbyte, + src/code-server, + src/dagster, + src/dbt-rpc, + src/superset, + src/observe, + src/pomerium +max-complexity = 10 +max-line-length = 120 +extend-ignore = E203 +ignore = + E266 + E741 + E713 + E731 + W503 + C901 +per-file-ignores = + cli.py:E501 + scripts/docker_images.py:E501 + scripts/k8s_deploy.py:E501 + *_generator.py:F541 diff --git a/.gitguardian.yml b/.gitguardian.yml new file mode 100644 index 00000000..4f7d59de --- /dev/null +++ b/.gitguardian.yml @@ -0,0 +1,5 @@ +version: 2 + +secret: + ignored-paths: + - '.gitsecret/**' diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..7e4a2982 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,42 @@ + + +## Testing steps: + + + +## Checklist: + + + +- [ ] [Mandatory] My pull request represents one logical piece of work and my commits are related and look clean. + +- [ ] [Mandatory] I ran the integration tests (`./cli.py integration_tests`) + +- [ ] I have bumped minor or major version accordingly on `.version.yml` (see [Docs](https://github.com/datacoves/datacoves/blob/main/docs/how-tos/datacoves-versioning.md)) + +- [ ] I created new 1Password items (`./cli.py sync_secrets`) + +- [ ] I have built new docker images on this branch (`./cli.py build_and_push`) so `./cli.py set_release` needs to be run to test it + +- [ ] This requires configuration changes on the cluster (please specify configuration changes below) + +- [ ] IF there are configuration changes or other changes to the release, please SET the 'special release step' label on this issue! + diff --git a/.github/workflows/build_deploy_airflow-airflow.yml b/.github/workflows/build_deploy_airflow-airflow.yml new file mode 100644 index 00000000..a9463eac --- /dev/null +++ b/.github/workflows/build_deploy_airflow-airflow.yml @@ -0,0 +1,59 @@ +name: Build and deploy airflow-airflow + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/airflow/airflow/* + - src/airflow/airflow/**/* + - src/common/* + - src/common/**/* + +jobs: + collect-airflow-profiles: # will output all airflow profiles under the profiles dir + runs-on: ubuntu-latest + outputs: + dir: ${{ steps.collect.outputs.dir }} + steps: + - uses: actions/checkout@v2 + - id: collect + run: | + cd src/airflow/airflow + echo "::set-output name=dir::$(ls -1 profiles/ | jq -R -s -c 'split("\n")[:-1]')" + + build_and_deploy_airflow-airflow: + name: Build and deploy airflow-airflow + runs-on: ubuntu-latest + container: datacoves/ci-multiarch:latest + needs: [collect-airflow-profiles] + strategy: + matrix: + dir: ${{ fromJson(needs.collect-airflow-profiles.outputs.dir) }} + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash + + - name: Build and deploy airflow-airflow base + run: ./cli.py ci_build_and_push airflow/airflow datacovesprivate ${{ matrix.dir }} + shell: bash diff --git a/.github/workflows/build_deploy_ci-airflow.yml b/.github/workflows/build_deploy_ci-airflow.yml new file mode 100644 index 00000000..97caf5d5 --- /dev/null +++ b/.github/workflows/build_deploy_ci-airflow.yml @@ -0,0 +1,60 @@ +name: Build and deploy ci-airflow + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/ci/airflow/* + - src/ci/airflow/**/* + - src/common/* + - src/common/**/* + +jobs: + collect-airflow-profiles: # will output all airflow profiles under the profiles dir + runs-on: ubuntu-latest + outputs: + dir: ${{ steps.collect.outputs.dir }} + steps: + - uses: actions/checkout@v2 + - id: collect + run: | + cd src/ci/airflow + echo "::set-output name=dir::$(ls -1 profiles/ | jq -R -s -c 'split("\n")[:-1]')" + + build_and_deploy_ci-airflow: + name: Build and deploy ci-airflow + runs-on: ubuntu-latest + container: datacoves/ci-multiarch:latest + needs: [collect-airflow-profiles] + strategy: + matrix: + dir: ${{ fromJson(needs.collect-airflow-profiles.outputs.dir) }} + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + DATACOVES_GITHUB_API_TOKEN: ${{ secrets.DATACOVES_GITHUB_API_TOKEN }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash + + - name: Build and deploy ci-airflow + run: | + ./cli.py ci_build_and_push ci/airflow datacoves ${{ matrix.dir }} diff --git a/.github/workflows/build_deploy_ci-basic.yml b/.github/workflows/build_deploy_ci-basic.yml new file mode 100644 index 00000000..6c8c16f3 --- /dev/null +++ b/.github/workflows/build_deploy_ci-basic.yml @@ -0,0 +1,59 @@ +name: Build and deploy ci-basic + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/ci/basic/* + - src/ci/basic/**/* + - src/common/* + - src/common/**/* + +jobs: + build_and_deploy_ci-basic: + name: Build and deploy ci-basic + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + DATACOVES_GITHUB_API_TOKEN: ${{ secrets.DATACOVES_GITHUB_API_TOKEN }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy ci-basic base + run: ./cli.py ci_build_and_push ci/basic datacoves base + shell: bash + + - name: Build and deploy ci-basic dbt-snowflake + run: ./cli.py ci_build_and_push ci/basic datacoves dbt-snowflake + shell: bash + + - name: Build and deploy ci-basic dbt-redshift + run: ./cli.py ci_build_and_push ci/basic datacoves dbt-redshift + shell: bash + + - name: Build and deploy ci-basic dbt-bigquery + run: ./cli.py ci_build_and_push ci/basic datacoves dbt-bigquery + shell: bash + + - name: Build and deploy ci-basic dbt-databricks + run: ./cli.py ci_build_and_push ci/basic datacoves dbt-databricks + shell: bash diff --git a/.github/workflows/build_deploy_ci-multiarch.yml b/.github/workflows/build_deploy_ci-multiarch.yml new file mode 100644 index 00000000..bdc0ad74 --- /dev/null +++ b/.github/workflows/build_deploy_ci-multiarch.yml @@ -0,0 +1,44 @@ +name: Build and deploy ci-multiarch + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/ci/multiarch/* + - src/ci/multiarch/**/* + - requirements.txt + +jobs: + build_and_deploy_ci-multiarch: + name: Build and deploy ci-multiarch + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + - name: Install missing requirements + run: | + apk update && apk upgrade && apk add --no-cache python3-dev + pip install -r requirements.txt + - name: Build and deploy ci-multiarch + run: ./cli.py ci_build_and_push ci/multiarch datacoves + shell: bash diff --git a/.github/workflows/build_deploy_code-server-code-server.yml b/.github/workflows/build_deploy_code-server-code-server.yml new file mode 100644 index 00000000..b6ccd3a3 --- /dev/null +++ b/.github/workflows/build_deploy_code-server-code-server.yml @@ -0,0 +1,62 @@ +name: Build and deploy code-server-code-server + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/code-server/code-server/* + - src/code-server/code-server/**/* + - src/common/* + - src/common/**/* + +jobs: + build_and_deploy_code-server-code-server: + name: Build and deploy code-server-code-server + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash + + - name: Build and deploy code-server-code-server base + run: ./cli.py ci_build_and_push code-server/code-server datacovesprivate base + shell: bash + + - name: Build and deploy code-server-code-server dbt-snowflake + run: ./cli.py ci_build_and_push code-server/code-server datacovesprivate dbt-snowflake + shell: bash + + - name: Build and deploy code-server-code-server dbt-redshift + run: ./cli.py ci_build_and_push code-server/code-server datacovesprivate dbt-redshift + shell: bash + + - name: Build and deploy code-server-code-server dbt-bigquery + run: ./cli.py ci_build_and_push code-server/code-server datacovesprivate dbt-bigquery + shell: bash + + - name: Build and deploy code-server-code-server dbt-databricks + run: ./cli.py ci_build_and_push code-server/code-server datacovesprivate dbt-databricks + shell: bash diff --git a/.github/workflows/build_deploy_code-server-dbt-core-interface.yml b/.github/workflows/build_deploy_code-server-dbt-core-interface.yml new file mode 100644 index 00000000..dfa09538 --- /dev/null +++ b/.github/workflows/build_deploy_code-server-dbt-core-interface.yml @@ -0,0 +1,58 @@ +name: Build and deploy code-server-dbt-core-interface + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/code-server/dbt-core-interface/* + - src/code-server/dbt-core-interface/**/* + - src/common/* + - src/common/**/* + +jobs: + build_and_deploy_code-server-dbt-core-interface: + name: Build and deploy code-server-dbt-core-interface + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy code-server-dbt-core-interface base + run: ./cli.py ci_build_and_push code-server/dbt-core-interface datacovesprivate base + shell: bash + + - name: Build and deploy code-server-dbt-core-interface dbt-snowflake + run: ./cli.py ci_build_and_push code-server/dbt-core-interface datacovesprivate dbt-snowflake + shell: bash + + - name: Build and deploy code-server-dbt-core-interface dbt-redshift + run: ./cli.py ci_build_and_push code-server/dbt-core-interface datacovesprivate dbt-redshift + shell: bash + + - name: Build and deploy code-server-dbt-core-interface dbt-bigquery + run: ./cli.py ci_build_and_push code-server/dbt-core-interface datacovesprivate dbt-bigquery + shell: bash + + - name: Build and deploy code-server-dbt-core-interface dbt-databricks + run: ./cli.py ci_build_and_push code-server/dbt-core-interface datacovesprivate dbt-databricks + shell: bash diff --git a/.github/workflows/build_deploy_core-admission-controller-bootstrap.yml b/.github/workflows/build_deploy_core-admission-controller-bootstrap.yml new file mode 100644 index 00000000..3f9a8bb6 --- /dev/null +++ b/.github/workflows/build_deploy_core-admission-controller-bootstrap.yml @@ -0,0 +1,44 @@ +name: Build and deploy dynamic-admission-controller-bootstrap + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/core/admission-controller-bootstrap/* + - src/core/admission-controller-bootstrap/**/* + +jobs: + build_and_deploy_dynamic-admission-controller: + name: Build and deploy admission-controller-bootstrap + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash + + - name: Build and deploy admission-controller-bootstrap + run: ./cli.py ci_build_and_push core/admission-controller-bootstrap + shell: bash diff --git a/.github/workflows/build_deploy_core-admission-controller.yml b/.github/workflows/build_deploy_core-admission-controller.yml new file mode 100644 index 00000000..619329ea --- /dev/null +++ b/.github/workflows/build_deploy_core-admission-controller.yml @@ -0,0 +1,44 @@ +name: Build and deploy dynamic-admission-controller + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/core/admission-controller/* + - src/core/admission-controller/**/* + +jobs: + build_and_deploy_dynamic-admission-controller: + name: Build and deploy admission-controller + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash + + - name: Build and deploy dynamic-admission-controller + run: ./cli.py ci_build_and_push core/admission-controller + shell: bash diff --git a/.github/workflows/build_deploy_core-api.yml b/.github/workflows/build_deploy_core-api.yml new file mode 100644 index 00000000..f96cb4fc --- /dev/null +++ b/.github/workflows/build_deploy_core-api.yml @@ -0,0 +1,119 @@ +name: Build and deploy core-api + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/core/api/* + - src/core/api/**/* + - "!src/core/api/app/integration_tests/**/*" + +jobs: + check_core-api: + name: Check core-api + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash + + build_and_deploy_core-api-local: + name: Build and deploy core-api-local + runs-on: ubuntu-latest + needs: check_core-api + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Get modified files + id: git-diff + run: | + echo "diff<> "$GITHUB_OUTPUT" + echo $(git diff --name-only HEAD~1 HEAD) >> "$GITHUB_OUTPUT" + echo "EOF" >> "$GITHUB_OUTPUT" + + - name: Build and deploy core-api-local + if: ${{ contains(steps.git-diff.outputs.diff, 'src/core/api/Dockerfile') || contains(steps.git-diff.outputs.diff, 'src/core/api/requirements.txt') }} + run: ./cli.py ci_build_and_push core/api datacovesprivate local + shell: bash + + test_core-api: + name: Test core-api + runs-on: ubuntu-latest + needs: build_and_deploy_core-api-local + + container: datacovesprivate/core-api-local:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Django test on core-api + run: | + cd src/core/api/app/ + ./manage.py test + + build_and_deploy_core-api: + name: Build and deploy core-api + runs-on: ubuntu-latest + needs: test_core-api + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy core-api + run: ./cli.py ci_build_and_push core/api + shell: bash diff --git a/.github/workflows/build_deploy_core-dbt-api.yml b/.github/workflows/build_deploy_core-dbt-api.yml new file mode 100644 index 00000000..b3c8844e --- /dev/null +++ b/.github/workflows/build_deploy_core-dbt-api.yml @@ -0,0 +1,40 @@ +name: Build and deploy core-dbt-api + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/core/dbt-api/* + - src/core/dbt-api/**/* + +jobs: + build_and_deploy_core-dbt-api: + name: Build and deploy core-dbt-api + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy core-dbt-api + run: ./cli.py ci_build_and_push core/dbt-api + shell: bash diff --git a/.github/workflows/build_deploy_core-operator.yml b/.github/workflows/build_deploy_core-operator.yml new file mode 100644 index 00000000..d335b400 --- /dev/null +++ b/.github/workflows/build_deploy_core-operator.yml @@ -0,0 +1,40 @@ +name: Build and deploy core-operator + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/core/operator/* + - src/core/operator/**/* + +jobs: + build_and_deploy_core-operator: + name: Build and deploy core-operator + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy core-operator + run: ./cli.py ci_build_and_push core/operator + shell: bash diff --git a/.github/workflows/build_deploy_core-static-pages.yml b/.github/workflows/build_deploy_core-static-pages.yml new file mode 100644 index 00000000..5035b67c --- /dev/null +++ b/.github/workflows/build_deploy_core-static-pages.yml @@ -0,0 +1,40 @@ +name: Build and deploy core-static-pages + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/core/static-pages/* + - src/core/static-pages/**/* + +jobs: + build_and_deploy_core-static-pages: + name: Build and deploy core-static-pages + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy core-static-pages + run: ./cli.py ci_build_and_push core/static-pages + shell: bash diff --git a/.github/workflows/build_deploy_core-workbench.yml b/.github/workflows/build_deploy_core-workbench.yml new file mode 100644 index 00000000..6b520f88 --- /dev/null +++ b/.github/workflows/build_deploy_core-workbench.yml @@ -0,0 +1,102 @@ +name: Test, build and deploy core-workbench + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/core/workbench/* + - src/core/workbench/**/* + +jobs: + build_and_deploy_core-workbench-local: + name: Build and deploy core-workbench-local + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Get modified files + id: git-diff + run: | + echo "diff<> "$GITHUB_OUTPUT" + echo $(git diff --name-only HEAD~1 HEAD) >> "$GITHUB_OUTPUT" + echo "EOF" >> "$GITHUB_OUTPUT" + + - name: Build and deploy core-workbench-local + if: ${{ contains(steps.git-diff.outputs.diff, 'src/core/workbench/Dockerfile') || contains(steps.git-diff.outputs.diff, 'src/core/workbench/app/package.json') }} + run: ./cli.py ci_build_and_push core/workbench datacovesprivate local + + test_core-workbench: + needs: build_and_deploy_core-workbench-local + name: Test core-workbench-local + runs-on: ubuntu-latest + + container: datacovesprivate/core-workbench-local:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Test core-workbench + run: | + mv /usr/src/node_modules src/core/workbench/app/ + cd src/core/workbench/app/ + yarn test:coverage + + build_and_deploy_core-workbench: + needs: test_core-workbench + + name: Build and deploy core-workbench + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + - name: Checkout and deploy core-workbench + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + set-safe-directory: true + + - name: Build an deploy core-workbench + run: ./cli.py ci_build_and_push core/workbench diff --git a/.github/workflows/build_deploy_observe-dbt-docs.yml b/.github/workflows/build_deploy_observe-dbt-docs.yml new file mode 100644 index 00000000..cdd08ec8 --- /dev/null +++ b/.github/workflows/build_deploy_observe-dbt-docs.yml @@ -0,0 +1,40 @@ +name: Build and deploy observe-dbt-docs + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/observe/dbt-docs/* + - src/observe/dbt-docs/**/* + +jobs: + build_and_deploy_observe-dbt-docs: + name: Build and deploy observe-dbt-docs + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy observe-dbt-docs + run: ./cli.py ci_build_and_push observe/dbt-docs + shell: bash diff --git a/.github/workflows/build_deploy_observe-local-dbt-docs.yml b/.github/workflows/build_deploy_observe-local-dbt-docs.yml new file mode 100644 index 00000000..6d73d1fd --- /dev/null +++ b/.github/workflows/build_deploy_observe-local-dbt-docs.yml @@ -0,0 +1,40 @@ +name: Build and deploy observe-local-dbt-docs + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/observe/local-dbt-docs/* + - src/observe/local-dbt-docs/**/* + +jobs: + build_and_deploy_observe-local-dbt-docs: + name: Build and deploy observe-local-dbt-docs + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy observe-local-dbt-docs + run: ./cli.py ci_build_and_push observe/local-dbt-docs + shell: bash diff --git a/.github/workflows/build_deploy_pomerium-redis.yml b/.github/workflows/build_deploy_pomerium-redis.yml new file mode 100644 index 00000000..a483f984 --- /dev/null +++ b/.github/workflows/build_deploy_pomerium-redis.yml @@ -0,0 +1,40 @@ +name: Build and deploy pomerium-redis + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/pomerium/redis/* + - src/pomerium/redis/**/* + +jobs: + build_and_deploy_pomerium-redis: + name: Build and deploy pomerium-redis + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy pomerium-redis + run: ./cli.py ci_build_and_push pomerium/redis + shell: bash diff --git a/.github/workflows/build_deploy_sidecar-k8s-monitor.yml b/.github/workflows/build_deploy_sidecar-k8s-monitor.yml new file mode 100644 index 00000000..d8540156 --- /dev/null +++ b/.github/workflows/build_deploy_sidecar-k8s-monitor.yml @@ -0,0 +1,40 @@ +name: Build and deploy sidecar-k8s-monitor + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/sidecar/k8s-monitor/* + - src/sidecar/k8s-monitor/**/* + +jobs: + build_and_deploy_sidecar-k8s-monitor: + name: Build and deploy sidecar-k8s-monitor + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Build and deploy sidecar-k8s-monitor + run: ./cli.py ci_build_and_push sidecar/k8s-monitor + shell: bash diff --git a/.github/workflows/build_deploy_superset-superset.yml b/.github/workflows/build_deploy_superset-superset.yml new file mode 100644 index 00000000..bffd5b53 --- /dev/null +++ b/.github/workflows/build_deploy_superset-superset.yml @@ -0,0 +1,44 @@ +name: Build and deploy superset-superset + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + - prev + paths: + - src/superset/superset/* + - src/superset/superset/**/* + +jobs: + build_and_deploy_superset-superset: + name: Build and deploy superset-superset + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash + + - name: Build and deploy superset-superset + run: ./cli.py ci_build_and_push superset/superset + shell: bash diff --git a/.github/workflows/build_devdocs.yml b/.github/workflows/build_devdocs.yml new file mode 100644 index 00000000..8e653114 --- /dev/null +++ b/.github/workflows/build_devdocs.yml @@ -0,0 +1,45 @@ +# This is a basic workflow to help you get started with Actions + +name: Build and Deploy DevDocs Site + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + push: + branches: + - main + paths: + - docs/* + - docs/**/* + +jobs: + build: + runs-on: [self-hosted, Linux] + steps: + - name: Create Repo Directory and Ownership + run: | + rm -rf /home/datacoves/actions-runner/_work/devdocs + mkdir -p /home/datacoves/actions-runner/_work/datacoves/datacoves + sudo chown -R $USER:$USER /home/datacoves/actions-runner/_work/datacoves/datacoves + - name: Checkout branch + uses: actions/checkout@v4 + with: + path: devdocs + - name: Move directory out of its default location + run: mv /home/datacoves/actions-runner/_work/datacoves/datacoves/devdocs /home/datacoves/actions-runner/_work/ + - name: Install Dependencies + run: cd /home/datacoves/actions-runner/_work/devdocs && pip install -r requirements.txt + # Compile our HTML + - name: Build HTML + run: | + cd /home/datacoves/actions-runner/_work/devdocs && ./cli.py compile_docs docs docs_output + cd /home/datacoves/actions-runner/_work/devdocs && mv docs_output docs + - name: Re-run docker processes + run: | + cd /home/datacoves/actions-runner/_work/devdocs/docs + docker-compose down -v --rmi all + docker-compose build + docker-compose up --remove-orphans -d diff --git a/.github/workflows/combined_release_notes.yml b/.github/workflows/combined_release_notes.yml new file mode 100644 index 00000000..99d5fe95 --- /dev/null +++ b/.github/workflows/combined_release_notes.yml @@ -0,0 +1,52 @@ +name: Combined release notes + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + inputs: + release: + description: "Release" + required: true + +jobs: + combine_release_notes: + name: Combined release notes ${{ inputs.release }} + runs-on: ubuntu-latest + steps: + - name: Install 1Password client + run: | + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg + echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/amd64 stable main' | sudo tee /etc/apt/sources.list.d/1password.list + sudo mkdir -p /etc/debsig/policies/AC2D62742012EA22/ + curl -sS https://downloads.1password.com/linux/debian/debsig/1password.pol | sudo tee /etc/debsig/policies/AC2D62742012EA22/1password.pol + sudo mkdir -p /usr/share/debsig/keyrings/AC2D62742012EA22 + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/debsig/keyrings/AC2D62742012EA22/debsig.gpg + sudo apt update && sudo apt install 1password-cli + op --version + - name: Checkout branch + uses: actions/checkout@v4 + - name: Install python dependencies + run: pip install -r requirements.txt + - name: Reveal secrets + run: ./cli.py reveal_secrets -y + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.ONE_PASSWORD_SERVICE_ACCOUNT }} + - name: Run combined release notes + run: ./cli.py combined_release_notes ${{ inputs.release }} + shell: bash + - name: Make the merged version as well + run: python ./scripts/notemerge.py combined.md merged-${{ inputs.release }}.md + shell: bash + - name: Rename combined release notes file + run: mv combined.md combined-${{ inputs.release }}.md + shell: bash + - uses: actions/upload-artifact@v4 + with: + name: combined + path: | + combined-${{ inputs.release }}.md + merged-${{ inputs.release }}.md + diff --git a/.github/workflows/generate_release.yml b/.github/workflows/generate_release.yml new file mode 100644 index 00000000..d15de9cc --- /dev/null +++ b/.github/workflows/generate_release.yml @@ -0,0 +1,34 @@ +name: Generate new GitHub release + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + +jobs: + generate_release: + name: Generate release + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + DATACOVES_GITHUB_API_TOKEN: ${{ secrets.DATACOVES_GITHUB_API_TOKEN }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Generate release + run: ./cli.py generate_release + shell: bash diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml new file mode 100644 index 00000000..d5a8e688 --- /dev/null +++ b/.github/workflows/integration_tests.yml @@ -0,0 +1,64 @@ +name: Integration Tests +on: + workflow_dispatch: + +jobs: + collect-integration-tests: # will output all integration tests under the tests dir + runs-on: ubuntu-latest + outputs: + dir: ${{ steps.collect.outputs.dir }} + steps: + - uses: actions/checkout@v2 + - id: collect + run: | + cd src/core/api/app + echo "::set-output name=dir::$(ls -d integration_tests/*/test_*.py | jq -R -s -c 'split("\n")[:-1]' | sed -e 's#integration_tests/##g')" + + integration-test: + runs-on: ubuntu-latest + needs: [collect-integration-tests] + strategy: + matrix: + dir: ${{ fromJson(needs.collect-integration-tests.outputs.dir) }} + steps: + - name: Checkout branch + uses: actions/checkout@v3 + - name: Grant permissions # Allow abc user to write into github runner's filesystem + run: | + chmod 777 src/core/api/app + chmod 777 src/core/workbench/app + chmod 777 src/core/dbt-api + mkdir src/core/api/app/integration_tests/output + chmod 777 src/core/api/app/integration_tests/output + - name: Install 1Password client + run: | + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg + echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/amd64 stable main' | sudo tee /etc/apt/sources.list.d/1password.list + sudo mkdir -p /etc/debsig/policies/AC2D62742012EA22/ + curl -sS https://downloads.1password.com/linux/debian/debsig/1password.pol | sudo tee /etc/debsig/policies/AC2D62742012EA22/1password.pol + sudo mkdir -p /usr/share/debsig/keyrings/AC2D62742012EA22 + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/debsig/keyrings/AC2D62742012EA22/debsig.gpg + sudo apt update && sudo apt install 1password-cli + op --version + - name: Install python dependencies + run: pip install -r requirements.txt + - name: Reveal secrets + run: ./cli.py reveal_secrets -y + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.ONE_PASSWORD_SERVICE_ACCOUNT }} + - name: Deploy cluster + run: ./cli.py kind_create + - name: Add Bitnami repo explicitly + run: helm repo add bitnami https://charts.bitnami.com/bitnami + - name: Set latest release + run: ./cli.py set_latest_release datacoveslocal.com + - name: Install Datacoves + run: ./ci.py install + - name: Integration tests + run: | + ./cli.py integration_tests ${{ matrix.dir }} + - uses: actions/upload-artifact@v4 + if: ${{ always() }} + with: + name: test-output + path: src/core/api/app/integration_tests/output diff --git a/.github/workflows/integration_tests_self_hosted.yml b/.github/workflows/integration_tests_self_hosted.yml new file mode 100644 index 00000000..de40184c --- /dev/null +++ b/.github/workflows/integration_tests_self_hosted.yml @@ -0,0 +1,87 @@ +name: Integration Tests Self Hosted + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: +# schedule: +# - cron: "30 14 * * *" + workflow_dispatch: + inputs: + test_name: + description: "Test name: e.g., workbench/test_workbench_docs.py" + default: '' + required: false + type: string + +jobs: + create-datacoveslocal-cluster: + runs-on: [self-hosted, Linux] + steps: + - name: Create repo directory and set ownership + run: | + mkdir -p /home/datacoves//actions-runner/_work/datacoves + sudo chown -R $USER:$USER /home/datacoves/actions-runner/_work/datacoves + - name: Checkout branch + uses: actions/checkout@v3 + - name: Grant permissions # Allow abc user to write into github runner's filesystem + run: | + chmod 777 src/core/api/app + chmod 777 src/core/workbench/app + chmod 777 src/core/dbt-api + mkdir src/core/api/app/integration_tests/output + chmod 777 src/core/api/app/integration_tests/output + - name: Install 1Password client + run: | + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg + echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/amd64 stable main' | sudo tee /etc/apt/sources.list.d/1password.list + sudo mkdir -p /etc/debsig/policies/AC2D62742012EA22/ + curl -sS https://downloads.1password.com/linux/debian/debsig/1password.pol | sudo tee /etc/debsig/policies/AC2D62742012EA22/1password.pol + sudo mkdir -p /usr/share/debsig/keyrings/AC2D62742012EA22 + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/debsig/keyrings/AC2D62742012EA22/debsig.gpg + sudo apt update && sudo apt install 1password-cli + op --version + - name: Install python dependencies + run: pip install -r requirements.txt + - name: Reveal secrets + run: ./cli.py reveal_secrets -y + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.ONE_PASSWORD_SERVICE_ACCOUNT }} + - name: Deploy cluster + run: | + if kubectl get namespaces &> /dev/null; then + echo "Cluster exists, deleting..." + ./cli.py kind_delete + echo "Creating a new cluster..." + ./cli.py kind_create + else + echo "No cluster found, creating a new one..." + ./cli.py kind_create + fi + - name: Set latest release + run: ./cli.py set_latest_release datacoveslocal.com + - name: Install Datacoves + run: ./ci.py install + integration-tests: + runs-on: [self-hosted, Linux] + needs: [create-datacoveslocal-cluster] + steps: + - name: Integration tests ${{ inputs.test_name || '' }} + run: ./cli.py integration_tests ${{ inputs.test_name || '' }} + tear-down: + runs-on: [self-hosted, Linux] + needs: [integration-tests] + steps: + - name: Tear down + run: ./cli.py kind_delete + upload-artifacts: + runs-on: [self-hosted, Linux] + needs: [integration-tests] + if: ${{ always() }} + steps: + - uses: actions/upload-artifact@v4 + if: ${{ always() }} + with: + name: test-output + path: src/core/api/app/integration_tests/output diff --git a/.github/workflows/lint_pull_requests.yml b/.github/workflows/lint_pull_requests.yml new file mode 100644 index 00000000..ec2b4e89 --- /dev/null +++ b/.github/workflows/lint_pull_requests.yml @@ -0,0 +1,21 @@ +name: Lint + +on: [pull_request] + +jobs: + Lint: + name: Lint + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + + steps: + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Lint + run: flake8 + shell: bash diff --git a/.github/workflows/merge_secrets.yml b/.github/workflows/merge_secrets.yml new file mode 100644 index 00000000..6a2e1cd1 --- /dev/null +++ b/.github/workflows/merge_secrets.yml @@ -0,0 +1,35 @@ +name: Merge secrets +on: + pull_request: + types: [closed] + branches: + - main + +jobs: + merge-secrets: + runs-on: ubuntu-latest + steps: + - name: Checkout branch + uses: actions/checkout@v3 + + - name: Install 1Password client + run: | + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg + echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/amd64 stable main' | sudo tee /etc/apt/sources.list.d/1password.list + sudo mkdir -p /etc/debsig/policies/AC2D62742012EA22/ + curl -sS https://downloads.1password.com/linux/debian/debsig/1password.pol | sudo tee /etc/debsig/policies/AC2D62742012EA22/1password.pol + sudo mkdir -p /usr/share/debsig/keyrings/AC2D62742012EA22 + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/debsig/keyrings/AC2D62742012EA22/debsig.gpg + sudo apt update && sudo apt install 1password-cli + op --version + + - name: Install python dependencies + run: pip install -r requirements.txt + + - name: Merge secrets + run: | + echo "Merging from branch [$SOURCE_BRANCH]" + ./cli.py merge_secrets $SOURCE_BRANCH -y + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.ONE_PASSWORD_SERVICE_ACCOUNT }} + SOURCE_BRANCH: ${{ github.event.pull_request.head.ref }} diff --git a/.github/workflows/publish_extensible_images.yml b/.github/workflows/publish_extensible_images.yml new file mode 100644 index 00000000..0e334b94 --- /dev/null +++ b/.github/workflows/publish_extensible_images.yml @@ -0,0 +1,47 @@ +name: Publish extensible images + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + release: + types: published + +jobs: + publish_extensible_images: + name: Publish extensible images + runs-on: ubuntu-latest + + container: datacoves/ci-multiarch:latest + strategy: + matrix: + group: + - name: "ci-basic" + is_local: "False" + - name: "ci-airflow" + is_local: "False" + - name: "airflow-airflow" + is_local: "False" + - name: "airflow-airflow" + is_local: "True" + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + DATACOVES_GITHUB_API_TOKEN: ${{ secrets.DATACOVES_GITHUB_API_TOKEN }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Publish extensible images + run: ./cli.py publish_extensible_images ${{ matrix.group.name }} ${{ matrix.group.is_local }} + shell: bash diff --git a/.github/workflows/publish_release_assets.yml b/.github/workflows/publish_release_assets.yml new file mode 100644 index 00000000..a76ce37f --- /dev/null +++ b/.github/workflows/publish_release_assets.yml @@ -0,0 +1,89 @@ +name: Publish Release Assets + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + inputs: + release: + description: "Release name, starting with 'v'" + default: '' + required: false + type: string + release: + types: published + +jobs: + create-datacoveslocal-cluster: + runs-on: [self-hosted, Linux] + steps: + - name: Create repo directory and set ownership + run: | + mkdir -p /home/datacoves/actions-runner/_work/datacoves + sudo chown -R $USER:$USER /home/datacoves/actions-runner/_work/datacoves + - name: Checkout branch + uses: actions/checkout@v3 + with: + ref: ${{ inputs.release || github.event.pull_request.head.sha }} + - name: Grant permissions # Allow abc user to write into github runner's filesystem + run: | + chmod 777 src/core/api/app + chmod 777 src/core/workbench/app + chmod 777 src/core/dbt-api + mkdir src/core/api/app/integration_tests/output + chmod 777 src/core/api/app/integration_tests/output + - name: Install 1Password client + run: | + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg + echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] https://downloads.1password.com/linux/debian/amd64 stable main' | sudo tee /etc/apt/sources.list.d/1password.list + sudo mkdir -p /etc/debsig/policies/AC2D62742012EA22/ + curl -sS https://downloads.1password.com/linux/debian/debsig/1password.pol | sudo tee /etc/debsig/policies/AC2D62742012EA22/1password.pol + sudo mkdir -p /usr/share/debsig/keyrings/AC2D62742012EA22 + curl -sS https://downloads.1password.com/linux/keys/1password.asc | sudo gpg --yes --dearmor --output /usr/share/debsig/keyrings/AC2D62742012EA22/debsig.gpg + sudo apt update && sudo apt install 1password-cli + op --version + - name: Install python dependencies + run: pip install -r requirements.txt + - name: Reveal secrets + run: ./cli.py reveal_secrets -y + env: + OP_SERVICE_ACCOUNT_TOKEN: ${{ secrets.ONE_PASSWORD_SERVICE_ACCOUNT }} + - name: Deploy cluster + run: | + if kubectl get namespaces &> /dev/null; then + echo "Cluster exists, deleting..." + ./cli.py kind_delete + echo "Creating a new cluster..." + ./cli.py kind_create + else + echo "No cluster found, creating a new one..." + ./cli.py kind_create + fi + - name: Set latest release + run: ./cli.py set_latest_release datacoveslocal.com + - name: Install Datacoves + run: ./ci.py install + + publish-release-assets: + name: Publish Release Assets + runs-on: [self-hosted, Linux] + needs: [create-datacoveslocal-cluster] + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} + + steps: + - name: Publish assets to S3 + run: ./cli.py build_and_deploy_static_files ${{ inputs.release || github.ref }} + + tear-down: + runs-on: [self-hosted, Linux] + needs: [publish-release-assets] + steps: + - name: Tear down + run: ./cli.py kind_delete diff --git a/.github/workflows/rebuild_on_major_version_bump.yml b/.github/workflows/rebuild_on_major_version_bump.yml new file mode 100644 index 00000000..c0a5869e --- /dev/null +++ b/.github/workflows/rebuild_on_major_version_bump.yml @@ -0,0 +1,59 @@ +name: Rebuild all on major version bump + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + workflow_dispatch: + inputs: + force: + description: 'manually force build' + type: boolean + default: false + push: + branches: + - main + paths: + - .version.yml + +jobs: + collect-images-to-rebuild: + name: Collect images to rebuild + container: datacoves/ci-multiarch:latest + + runs-on: [self-hosted, Linux] + outputs: + images: ${{ steps.collect.outputs.images }} + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + - uses: actions/checkout@v3 + with: + clean: false + fetch-depth: 2 + - id: collect + run: ./cli.py ci_rebuild_on_major_bump ${{ github.event.inputs.force }} + image-rebuild: + name: Build and deploy image + container: datacoves/ci-multiarch:latest + + runs-on: [self-hosted, Linux] + needs: [collect-images-to-rebuild] + strategy: + matrix: + images: ${{ fromJson(needs.collect-images-to-rebuild.outputs.images) }} + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Trust directory + run: git config --global --add safe.directory /__w/datacoves/datacoves + + - name: Checkout branch + uses: actions/checkout@v3 + + - name: Build and deploy ${{ matrix.images.image }} + run: ./cli.py ci_build_and_push ${{ matrix.images.image }} ${{ matrix.images.repo }} ${{ matrix.images.profile }} + shell: bash diff --git a/.github/workflows/test_core-api.yml b/.github/workflows/test_core-api.yml new file mode 100644 index 00000000..6a6d2faa --- /dev/null +++ b/.github/workflows/test_core-api.yml @@ -0,0 +1,39 @@ +name: Unit test core-api + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +on: + pull_request: + paths: + - src/core/api/* + - src/core/api/**/* + - "!src/core/api/app/integration_tests/**/*" + +jobs: + test_core-api: + name: Test core-api + runs-on: ubuntu-latest + + container: datacovesprivate/core-api-local:latest + + env: + DOCKER_TLS_CERTDIR: "/certs" + DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} + + steps: + - name: Checkout branch + uses: actions/checkout@v3 + with: + fetch-depth: 2 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Install requirements + run: | + pip install --no-input -r src/core/api/requirements.txt + + - name: Django test on core-api + run: | + cd src/core/api/app/ + ./manage.py test diff --git a/.gitignore b/.gitignore index bc7f66ce..7ff5ea89 100644 --- a/.gitignore +++ b/.gitignore @@ -9,6 +9,7 @@ /venv/* /output/* */__pycache__/* +__pycache__/ # exceptions !.gitkeep diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..f0eead16 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,46 @@ +repos: + - repo: local + hooks: + # Format. + - id: black + name: black + entry: black + language: python + language_version: python3 + additional_dependencies: [black] + minimum_pre_commit_version: 2.9.2 + require_serial: true + types_or: [cython, pyi, python] + - id: isort + name: isort + entry: isort + require_serial: true + language: python + language_version: python3 + additional_dependencies: [isort] + types_or: [cython, pyi, python] + args: ["--profile", "black", "./src/core", "./scripts", "./cli.py"] + # Lint + - id: linter + name: linter + entry: flake8 + language: python + language_version: python3 + additional_dependencies: [flake8] + types_or: [cython, pyi, python] + - id: dockerfile-labels-check + name: Dockerfile Labels Validator + entry: python3 ./scripts/pre-commit-hooks/validate_docker_labels.py + language: system + pass_filenames: false + - repo: https://github.com/gitguardian/ggshield + rev: v1.17.2 + hooks: + - id: ggshield + language_version: python3 + stages: [commit] + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 # Use the ref you want to point at + hooks: + - id: debug-statements + diff --git a/.version.yml b/.version.yml new file mode 100644 index 00000000..6a20eb87 --- /dev/null +++ b/.version.yml @@ -0,0 +1,2 @@ +# See docs/how-tos/datacoves-versioning.md +version: "4.0" diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..964c1427 --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,7 @@ +{ + "git.ignoreLimitWarning": true, + "[python]": { + "editor.defaultFormatter": "ms-python.black-formatter" + }, + "typescript.tsdk": "src/core/workbench/app/node_modules/typescript/lib" +} \ No newline at end of file diff --git a/README.md b/README.md index 3c61357a..649e736d 100644 --- a/README.md +++ b/README.md @@ -135,4 +135,4 @@ This should be like this instead: ### Tabs Functionality -Please see the [docsify-tabs](https://jhildenbiddle.github.io/docsify-tabs/#/) \ No newline at end of file +Please see the [docsify-tabs](https://jhildenbiddle.github.io/docsify-tabs/#/) diff --git a/ci.py b/ci.py new file mode 100755 index 00000000..a200b1b8 --- /dev/null +++ b/ci.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 + +import os +import sys + +from lib import cmd +from scripts import installer, k8s_utils, setup_core, setup_operator + +CLUSTER_DOMAIN = "datacoveslocal.com" + + +def install(): + """Install datacoves in a cluster.""" + os.environ["CI_RUNNING"] = "true" + k8s_utils.set_context("kind-datacoves-cluster") + setup_operator.setup_operator(CLUSTER_DOMAIN) + setup_core.setup_core(CLUSTER_DOMAIN) + installer.retry_helm_charts(prompt=False) + + +if __name__ == "__main__": + program_name, *args = sys.argv + + # Run from the directory that contains this script. + os.chdir(os.path.dirname(program_name)) + cmd.main() diff --git a/cli.py b/cli.py new file mode 100755 index 00000000..20150763 --- /dev/null +++ b/cli.py @@ -0,0 +1,931 @@ +#!/usr/bin/env python3 + +import json +import os +import pathlib +import sys +from os import listdir +from pathlib import Path + +import questionary +import yaml +from questionary.prompts.common import Choice + +from lib import argument_parsing_utils as arg_parse +from lib import cmd, config_files +from scripts import docker_images, helm_utils, installer, k8s_utils +from scripts import setup_base as base +from scripts import setup_core as core +from scripts import setup_operator as operator +from scripts import ( + setup_secrets, + stripe_copy, + stripe_utils, + translators, + versions, + volumes, +) +from scripts.console import print_logo +from scripts.dump_database import dump_database +from scripts.github import Releaser, get_prs_with_label +from scripts.releases import generate_release_name, get_latest_release + +## Project setup and dev tools + + +def init_dev(): + """Run the initial project setup (installs python dependencies with pip3 --user).""" + cmd.run("pre-commit install") + setup_local_ssl_certificate() + + +def setup_local_ssl_certificate(): + """Generate and install a local SSL certificate for datacoveslocal.com using mkcert.""" + domain = "datacoveslocal.com" + caroot = "./.mkcert" + os.environ["CAROOT"] = caroot + if Path(caroot).exists(): + print( + f"Local SSL cert found at {caroot}. To setup a new one delete the folder and rerun." + ) + return + cmd.run("mkcert -install") + cmd.run(f"mkcert {domain} *.{domain}") + cmd.run(f"mv datacoveslocal.com+1.pem config/{domain}/base/local-cert.cer") + cmd.run(f"mv datacoveslocal.com+1-key.pem config/{domain}/base/local-cert.key") + + +def brew_install_deps(): + """Install some development dependencies on osx with brew.""" + cmd.run("brew install gnupg kind git-secret mkcert nss go helm dnsmasq") + cmd.run("helm repo add bitnami https://charts.bitnami.com/bitnami") + cmd.run("brew install --cask 1password/tap/1password-cli") + print("Ensure that kind >= 0.11.1 is installed:") + cmd.run("kind --version") + print("Ensure that kubectl >= 1.24 is installed:") + cmd.run("kubectl version --client=true") + cmd.sh("mkdir -pv $(brew --prefix)/etc/") + cmd.sh( + "grep '.datacoveslocal.com' $(brew --prefix)/etc/dnsmasq.conf || " + "echo 'address=/.datacoveslocal.com/127.0.0.1' >> $(brew --prefix)/etc/dnsmasq.conf" + ) + cmd.sh( + "grep 'port=53' $(brew --prefix)/etc/dnsmasq.conf || " + "echo 'port=53' >> $(brew --prefix)/etc/dnsmasq.conf" + ) + cmd.run("sudo brew services start dnsmasq") + cmd.run("sudo mkdir -pv /etc/resolver") + cmd.sh( + "grep 'nameserver 127.0.0.1' /etc/resolver/datacoveslocal.com || " + "sudo bash -c 'echo \"nameserver 127.0.0.1\" > /etc/resolver/datacoveslocal.com'" + ) + init_dev() + + +def linux_install_deps(): + """Install some development dependencies on linux.""" + # This directory also needs to exist + GENERATED_PATH = Path("./.generated") + + if not GENERATED_PATH.exists(): + GENERATED_PATH.mkdir(parents=True) + + # Curl is not installed by default on all Linux distros, and many things + # here need it, so let's make sure we have it. + cmd.run("sudo apt -y install curl mkcert") + + # Stephen had an issue where his out of the box distribution generated + # "too many open files" errors pretty quickly. This will ensure there + # are enough file handles. + cmd.run("sudo sysctl fs.file-max=1000000") + cmd.run("sudo sysctl fs.inotify.max_user_watches=524288") + cmd.run("sudo sysctl fs.inotify.max_user_instances=512") + + cmd.sh( + "grep -qxF 'fs.file-max = 1000000' /etc/sysctl.conf || " + "sudo bash -c \"echo 'fs.file-max = 1000000' >> /etc/sysctl.conf\"" + ) + cmd.sh( + "grep -qxF 'fs.inotify.max_user_watches = 524288' /etc/sysctl.conf || " + "sudo bash -c \"echo 'fs.inotify.max_user_watches = 524288' >> /etc/sysctl.conf\"" + ) + cmd.sh( + "grep -qxF 'fs.inotify.max_user_instances = 512' /etc/sysctl.conf || " + "sudo bash -c \"echo 'fs.inotify.max_user_instances = 512' >> /etc/sysctl.conf\"" + ) + + if not os.path.exists("/etc/apt/sources.list.d/helm-stable-debian.list"): + cmd.run("bash scripts/shell/helm_repo.sh") + + cmd.run( + "sudo apt -y install gnupg git-secret golang helm libnss3-tools " + "build-essential dnsmasq docker.io" + ) + + # Add our user to Docker + cmd.sh("sudo adduser $USER docker") + + # Install 1Password + cmd.sh( + "curl -sS https://downloads.1password.com/linux/keys/1password.asc | \ + sudo gpg --dearmor --output /usr/share/keyrings/1password-archive-keyring.gpg" + ) + cmd.sh( + 'echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/1password-archive-keyring.gpg] \ + https://downloads.1password.com/linux/debian/$(dpkg --print-architecture) stable main" |' + "sudo tee /etc/apt/sources.list.d/1password.list" + ) + cmd.run("sudo mkdir -p /etc/debsig/policies/AC2D62742012EA22/") + cmd.sh( + "curl -sS https://downloads.1password.com/linux/debian/debsig/1password.pol | \ + sudo tee /etc/debsig/policies/AC2D62742012EA22/1password.pol" + ) + cmd.run("sudo mkdir -p /usr/share/debsig/keyrings/AC2D62742012EA22") + cmd.sh( + "curl -sS https://downloads.1password.com/linux/keys/1password.asc | \ + sudo gpg --dearmor --output /usr/share/debsig/keyrings/AC2D62742012EA22/debsig.gpg" + ) + cmd.sh("sudo apt update && sudo apt install 1password-cli") + + # This weird bash inside of bash thing is needed because cmd.sh uses + # /bin/sh instead of /bin/bash which doesn't support the <( syntax. + # + # We could probaly twist this around to use cmd.run instead, but for + # a one-off thing let's just go with simple if a little janky looking. + cmd.sh( + "bash -c 'bash <(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)'" + ) + + cmd.sh( + f"grep linuxbrew '{pathlib.Path.home()}/.bashrc' || " + "(echo; echo 'eval \"$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)\"') " + f">> '{pathlib.Path.home()}/.bashrc'" + ) + + cmd.sh( + 'bash -c \'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" && ' + "ulimit -n 1000000 && " + "brew install gnupg kind git-secret nss go helm kubectl k9s mkcert'" + ) + + # This repo is supposed to be added somewhere under setup_core, + # and it looks like it is there, but some kind of order of operations + # problem is making it not show up. It's easier to add it here. + cmd.sh( + 'bash -c \'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" && ' + "helm repo add bitnami https://charts.bitnami.com/bitnami'" + ) + + print( + "If you are using a shell other than BASH, you will need to add " + "HomeBrew to your path. See the end of your " + f"{pathlib.Path.home()}/.bashrc file for details." + ) + + print("Ensure that kind >= 0.11.1 is installed:") + cmd.sh( + 'bash -c \'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" && ' + "kind --version'" + ) + print("Ensure that kubectl >= 1.24 is installed:") + cmd.sh( + 'bash -c \'eval "$(/home/linuxbrew/.linuxbrew/bin/brew shellenv)" && ' + "kubectl version --client=true'" + ) + + # We can't run init_dev without adding the linuxbrew stuff to our PATH. + # init_dev is shared by Mac so we can't modify it to be linux-specific. + print( + "Please close this shell and open a fresh one for Linux Brew to work. " + "Linux Brew's path has to be set up before the Python virtual " + "environment or it will make errors." + ) + print("") + print("Then, in a fresh shell, start a virtual environment and:") + print("") + print("./cli.py init_dev") + + +def lint(): + """Run the linter.""" + cmd.run("pre-commit run black") + + +def format(): + """Run the autoformatter.""" + cmd.run("pre-commit run black") + cmd.run("pre-commit run isort") + + +def run_pre_commit_hook(hook_id): + """Run a pre-commit hook without invoking pre-commit.""" + pcc = config_files.load_yaml(".pre-commit-config.yaml") + for hook in pcc["repos"][0]["hooks"]: + if hook["id"] == hook_id: + cmd.run(hook["entry"], *hook.get("args", [])) + + +## Docker images + + +def images(with_extra_tags=False): + """List a cluster's docker images.""" + print_logo() + cluster_domain = choose_cluster() + images = docker_images.cluster_images(cluster_domain) + if with_extra_tags: + images = images.union(docker_images.get_extra_tags(images)) + return images + + +def release_images(): + """List a release's docker images.""" + release = choose_release(include_pres=True) + return docker_images.release_images(release) + + +def deploy_images(): + """Pull docker images from datacoves docker hub repo, retags them and pushes them to the cluster repo configured in {cluster_domain}.""" + print_logo() + cluster_domain = choose_cluster() + docker_images.deploy_images(cluster_domain) + + +def ci_rebuild_on_major_bump(force=False): + """ + Prints the list of images to rebuild, all of them if major version was bumped, none when not + """ + + prev_version = yaml.safe_load(cmd.output("git show HEAD~1:.version.yml"))["version"] + version = config_files.load_yaml(".version.yml")["version"] + if force or int(prev_version.split(".")[0]) != int(version.split(".")[0]): + images = docker_images.get_all_images_build_args() + else: + images = [] + print(f"::set-output name=images::{json.dumps(images)}") + + +def ci_build(image_path, repo="datacovesprivate", target=None): + """Build and tag a docker image.""" + image_path = arg_parse.parse_image_path(image_path) + docker_images.ci_build(image_path, repo, target) + + +def ci_build_and_push(image_path, repo="datacovesprivate", target=None): + """Build, tag and push a docker image.""" + print_logo() + image_path = arg_parse.parse_image_path(image_path) + is_main = setup_secrets.get_ticket_number_by_git_branch(prompt=False) is None + docker_images.build_and_push(image_path, repo, target, gen_latest=is_main) + + +def build_and_push(image_path, repo="datacovesprivate", target=None): + """Build, tag and push a docker image.""" + print_logo() + image_path = arg_parse.parse_image_path(image_path) + ticket = setup_secrets.get_ticket_number_by_git_branch(prompt=False) + if ticket: + if not target and docker_images.requires_target(image_path): + target = questionary.select( + "Choose target", choices=docker_images.VALID_PROFILES + ).ask() + docker_images.build_and_push( + image_path, + repo, + target, + custom_tag=f"pre-{ticket}", + ) + else: + print("No ticket provided") + + +def gc_images(docker_registry, cluster_domain_suffix_mask=".jnj.com", repo=""): + """Garbage collect unused docker images from a docker registry.""" + print( + """Please use your browser's devtools to borrow a docker hub token: + 1. Login to hub.docker.com. + 2. Open the devtools and look for an API call. + 3. Copy the token from the requests header that looks like "Authorization: Bearer {token}" + """ + ) + token = input("token: ") + docker_images.gc_images(docker_registry, token, cluster_domain_suffix_mask, repo) + + +def gc_images_dry_run(docker_registry, cluster_domain_suffix_mask=".jnj.com", repo=""): + """List unused docker images that would be garbage collected from a registry.""" + docker_images.gc_images( + docker_registry, "", cluster_domain_suffix_mask, repo, dry_run=True + ) + + +def active_image_tags(cluster_domain_suffix_mask=".jnj.com", repo=""): + """List active docker images (in use by an active release)""" + docker_images.active_image_tags(cluster_domain_suffix_mask, repo) + + +def generate_release(): + """Generates a new release file under releases.""" + versions.generate_release() + + +def generate_hotfix(): + """Generates a new hotfix based on the current branch and a previous + release""" + + print("Your GIT repository is in the following state:") + print(" ") + cmd.sh("git status") + print(" ") + + print( + "You should be on a hotfix branch and your changes should be " + "checked in with no extra files laying around. The release you " + "are hotfixing should also be downloaded in your 'releases' " + "directory." + ) + + if not questionary.confirm("Are you ready to continue?").ask(): + print("Try again later!") + return + + print("Select a release to build a hotfix from:") + release = choose_release(include_pres=False) + + print( + "Choose the images to rebuild for your hotfix. We will use the " + "images from the chosen release and only replace the images you " + "select with builds from this branch." + ) + + (buildable, other) = docker_images.replacable_images(release) + + to_rebuild = questionary.checkbox( + "Choose images to build and replace, if any", choices=buildable + ).ask() + + to_change = questionary.checkbox( + "Choose images to change the version of, if any", choices=other + ).ask() + + # Map image name to new version + change_versions = {} + + for change in to_change: + new_version = input(f"Enter a new version for {change} or hit enter to skip:") + + if new_version: + change_versions[change] = new_version + + # Build what we're building + if to_rebuild: + change_versions.update(docker_images.build_and_push_images(to_rebuild)) + + # Make a release that is the original release with the changes. + new_release, timestamp, ticket = generate_release_name(False) + + docker_images.make_new_release_from_old( + release, new_release, cmd.output("git rev-parse HEAD").strip(), change_versions + ) + + print( + f"Release {new_release} successfully generated and uploaded to GitHub. Please review it and publish it." + ) + + +def publish_extensible_images(group=None, local_version=""): + """ + Publish major, major.minor and latest versions for extensible images + group: one of docker_images.EXTENSIBLE_IMAGES + local_version: 'True' for i.e. 'airflow-local' or 'False' for normal. + """ + if not group: + print(f"Please provide one of the following: {docker_images.EXTENSIBLE_IMAGES}") + return + + if not local_version: + print( + "Please provide 'True' for local versions such as 'airflow-local' or 'False' for normal versions." + ) + + docker_images.publish_extensible_images(group, local_version) + + +def set_latest_release(cluster_domain: str = ""): + """Set the newest releases/ file into set_release""" + latest_release = get_latest_release() + set_release(cluster_domain, latest_release, prompt=False) + + +def set_release(cluster_domain: str = "", release: str = "", prompt=True): + """Set the release on cluster configuration submodules.""" + cluster_domains = [cluster_domain] if cluster_domain else choose_clusters() + if prompt: + cleanup = questionary.confirm("Cleanup local releases?").ask() + else: + cleanup = False + Releaser().download_releases(cleanup=cleanup) + release = release or choose_release( + include_pres="datacoveslocal.com" in cluster_domains + ) + for cluster_domain in cluster_domains: + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + versions.update_config_release(cluster_domain, release) + print(f"Cluster {cluster_domain} updated successfully [release=>{release}].") + + +def combined_release_notes(from_release: str = None): + """Creates a combined version of multiple release notes""" + cluster_domain = None + if from_release: + Releaser().download_releases() + else: + cluster_domain = choose_cluster() + + versions.combined_release_notes( + cluster_domain=cluster_domain, from_release=from_release + ) + + +def special_release_steps(from_release: str, to_release: str): + """Lists special release step labeled PR's from the indicated releases.""" + + if not from_release or not to_release: + print("Please provide both from and to releases") + return + + issues = get_prs_with_label(from_release, to_release, "special release step") + + for issue in issues: + print(issue.title) + print(f"https://github.com/datacoves/datacoves/pull/{issue.number}") + print("") + + print("(Done)") + + +## Kubernetes + + +def setup_base(domain=None): + """Setup the cluster base dependencies required by datacoves.""" + print_logo() + cluster_domain = choose_cluster(domain) + base.setup_base(cluster_domain) + + +def download_releases(): + """Downloads latest releases from GitHub""" + print_logo() + include_drafts = questionary.confirm("Include draft releases?").ask() + cleanup = questionary.confirm("Cleanup local releases?").ask() + Releaser().download_releases(include_drafts=include_drafts, cleanup=cleanup) + if include_drafts: + print( + "\nWARNING: Downloaded draft releases could be removed locally next time " + "you re-run this command and decide not to download them.\n" + ) + + +def install(domain=None, automatic=None): + """Install datacoves in a cluster.""" + print_logo() + + cluster_domain = choose_cluster(domain) + + installer.install_datacoves(cluster_domain, automatic == "COMPLY") + + +def choose_cluster(cluster_domain=None): + config_path = "./config" + + choices = [f for f in listdir(config_path) if Path(f"{config_path}/{f}").is_dir()] + default = "datacoveslocal.com" if "datacoveslocal.com" in choices else None + + if cluster_domain: + if cluster_domain not in choices: + print(f"Invalid domain {cluster_domain} selected by parameter") + exit() + + else: + cluster_domain = questionary.select( + "Choose cluster", choices=choices, default=default + ).ask() + + if not cluster_domain: + print("No cluster selected") + exit() + + cluster_yaml = config_files.load_file( + f"config/{cluster_domain}/cluster-params.yaml" + ) + + context = cluster_yaml["context"] + k8s_utils.set_context(context) + return cluster_domain + + +def choose_clusters(): + config_path = "./config" + + choices = [ + Choice(f, checked=False) + for f in listdir(config_path) + if Path(f"{config_path}/{f}").is_dir() + ] + + cluster_domains = questionary.checkbox("Choose clusters", choices=choices).ask() + + if not cluster_domains: + print("No cluster selected") + exit() + + return cluster_domains + + +def choose_release(include_pres=False, limit=6): + releases_path = "./releases" + + releases = [ + f.replace(".yaml", "") + for f in sorted(listdir(releases_path)) + if Path(f"{releases_path}/{f}").is_file() + ] + pres = [] + regular = [] + for release in releases: + if release.startswith("pre"): + pres.append(release) + else: + regular.append(release) + + choices = regular[-limit:] + (pres[-limit:] if include_pres else []) + + release = questionary.select( + "Choose release", choices=choices, default=choices[-1] + ).ask() + + if not release: + print("No release selected") + exit() + return release + + +def bundle_installer(*cluster_domains): + """Creates a tar bundle with everything needed to install datacoves on a cluster""" + cluster_domains = list(map(arg_parse.parse_cluster_domain, cluster_domains)) + installer.bundle_installer(*cluster_domains) + + +def rsync_installer(destination, client="jnj"): + """Rsync the installer's files to a datacoves mirror.""" + installer.rsync_to_client_mirror(destination, client) + + +def gen_core(cluster_domain="datacoveslocal.com"): + """Run gen_core.py from cluster.""" + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + core.gen_core(cluster_domain) + + +def setup_core(ctx="kind-datacoves-cluster", cluster_domain="datacoveslocal.com"): + """Setup the core namespace in the cluster.""" + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + k8s_utils.set_context(ctx) + core.setup_core(cluster_domain) + + +def install_crds(ctx="kind-datacoves-cluster"): + """Install the datacoves custom resource definitions to a cluster.""" + k8s_utils.set_context(ctx) + operator.install_crds() + + +def gen_operator(cluster_domain="datacoveslocal.com"): + """Run gen_operator.py from cluster.""" + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + operator.gen_operator(cluster_domain) + + +def setup_operator(ctx="kind-datacoves-cluster", cluster_domain="datacoveslocal.com"): + """Setup the operator's namespace in the cluster.""" + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + k8s_utils.set_context(ctx) + operator.setup_operator(cluster_domain) + + +def pause_operator(ctx="kind-datacoves-cluster"): + """Scale the operator deployment to 0 replicas.""" + k8s_utils.set_context(ctx) + operator.scale_operator(replicas=0) + + +def resume_operator(ctx="kind-datacoves-cluster"): + """Scale the operator deployment to 1 replica.""" + k8s_utils.set_context(ctx) + operator.scale_operator(replicas=1) + + +def run_operator(): + """Run the operator locally, outside of the cluster.""" + ctx = "kind-datacoves-cluster" + cluster_domain = "datacoveslocal.com" + cmd.run(f"kubectl config use-context {ctx}") + k8s_utils.set_context(ctx) + operator.scale_operator(replicas=0) + operator.run_operator(cluster_domain) + + +def pod_sh( + app=core.DatacovesCoreK8sName.API.value, ns="core", ctx="kind-datacoves-cluster" +): + """Kubectl exec wrapper to enter a pod for a deployment.""" + k8s_utils.set_context(ctx) + pod = k8s_utils.pod_for_deployment(ns, app) + run = k8s_utils.cmd_runner_in_pod( + ns, + pod, + container=( + core.DatacovesCoreK8sName.API.value + if app == core.DatacovesCoreK8sName.API.value + else "" + ), + ) + shells = ("bash", "ash", "sh") + if app == core.DatacovesCoreK8sName.API.value: + shells = ("bash",) + elif app == core.DatacovesCoreK8sName.WORKBENCH.value: + shells = ("ash",) + elif app == core.DatacovesCoreK8sName.DBT_API.value: + shells = ("sh",) + for sh in shells: + try: + run(sh) + return + except Exception: + # BUG: When starting the shell succeeds but the last command run within + # fails, the shell and kubectl propagate the exit code and we can't tell + # the difference from when the shell failed to start, so we try the next + # shell (and you have to hit ^D twice to exit). + pass + + +## Kind + + +def kind_create(): + """Create the local kind cluster and apply the global configuration.""" + setup_local_ssl_certificate() + cluster_domain = "datacoveslocal.com" + ctx = "kind-datacoves-cluster" + image = "kindest/node:v1.31.1" + config = f"config/{cluster_domain}/kind/kind-cluster.yaml" + cmd.run(f"kind create cluster --image {image} --config {config}") + k8s_utils.set_context(ctx) + base.setup_base(cluster_domain) + + +def kind_delete(): + """Delete the local kind cluster.""" + ctx = "kind-datacoves-cluster" + k8s_utils.set_context(ctx) + cmd.run("kind delete cluster --name datacoves-cluster") + + +def kind_build_and_load(image_path): + """Builds and loads a docker image to kind""" + image = image_path.replace("/", "-") + version = "temp" + docker_images.ci_build(image_path, version=version) + cmd.run(f"kind load docker-image {image}:{version} --name datacoves-cluster") + + +def kind_registry_create(): + """Run a local docker registry called kind-registry on localhost:5000.""" + running = False + try: + o = cmd.output("docker inspect -f {{.State.Running}} kind-registry") + running = o.strip() == "true" + except Exception: + pass + + if running: + print("kind-registry already running") + return + + cmd.run( + "docker run -d --restart=always -p 127.0.0.1:5000:5000 --name kind-registry registry:2" + ) + cmd.run("docker network connect kind kind-registry") + + +def kind_registry_delete(): + cmd.run("docker stop kind-registry") + cmd.run("docker rm kind-registry") + + +## Misc dev utils + + +def build_and_deploy_static_files(release: str): + """Build and deploy static files to S3. Requires running cluster.""" + k8s_utils.set_context("kind-datacoves-cluster") + core.build_and_deploy_static_files(release) + + +def unit_tests(): + """Run the tests""" + cmd.sh("python -m unittest discover -s . -p '*_test.py' -v ./lib") + cmd.sh("python -m unittest discover -s . -p '*_test.py' -v ./scripts") + k8s_utils.set_context("kind-datacoves-cluster") + core.run_unit_tests("datacoveslocal.com") + + +def integration_tests(single_test=""): + k8s_utils.set_context("kind-datacoves-cluster") + core.run_integration_tests("datacoveslocal.com", single_test) + + +def stripe_webhooks(): + """Runs core api locally using port-forward to expose the service to stripe, requires `brew install stripe/stripe-cli/stripe`.""" + print( + "\n Please run on a new terminal: `./cli.py stripe_listen` to start forwarding requests\n\n" + " Check that the webhook signing secret displayed is the same as $STRIPE_WEBHOOK_SECRET\n" + " Learn more at https://stripe.com/docs/stripe-cli\n" + ) + k8s_utils.set_context("kind-datacoves-cluster") + pod = k8s_utils.pod_for_deployment("core", core.DatacovesCoreK8sName.API.value) + k8s_utils.kubectl(f"-n core port-forward {pod} 8000") + + +def stripe_listen(): + """Runs stripe listen using datacoveslocal.com STRIPE_API_KEY""" + # stripe login --api-key $STRIPE_API_KEY` + path = Path("config") / "datacoveslocal.com" / "secrets" / "core-api.env" + api_key = config_files.load_file(path)["STRIPE_API_KEY"] + assert api_key.startswith("sk_test_") + cmd.run( + f"stripe listen --api-key={api_key} --forward-to=localhost:8000/api/billing/stripe" + ) + + +def get_helm_release_revisions(ns, release_name, ctx="kind-datacoves-cluster"): + """List helm release revisions.""" + k8s_utils.set_context(ctx) + return helm_utils.get_helm_release_revisions(ns, release_name) + + +def gc_helm_release(ns, release_name, ctx="kind-datacoves-cluster"): + """Delete past helm release state secrets.""" + k8s_utils.set_context(ctx) + helm_utils.gc_helm_release(ns, release_name) + + +def retry_helm_charts(): + """Retry helm charts that are in a pending state.""" + print_logo() + choose_cluster() + installer.retry_helm_charts(include_pending=True) + + +def nuke_helm_release(ns, release_name, ctx="kind-datacoves-cluster"): + k8s_utils.set_context(ctx) + helm_utils.nuke_helm_release(ns, release_name) + + +def dockerfile_to_python(path): + """Convert a dockerfile to the python source code that would generate it.""" + translators.dockerfile_to_python(path) + + +def idp_callback_url(cluster_domain): + """The callback url that identity providers need to accept.""" + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + backend = "ping_federate" if cluster_domain.endswith(".jnj.com") else "auth0" + print(f"https://api.{cluster_domain}/complete/{backend}") + + +def idp_signoff_urls(cluster_domain, *envs): + """The list of signoff allowed redirect urls that identity providers need to accept.""" + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + urls = [] + for env in envs: + urls += [f"https://{env}.{cluster_domain}"] + return urls + + +def pull_submodules(): + """git pull each submodule.""" + cmd.run("git submodule foreach", "git pull") + + +def clean_submodules(): + """git clean and reveal git secrets for each submodule.""" + cmd.run("git submodule foreach", "git clean -fdx && git secret reveal -f") + + +def get_pvs(ctx="kind-datacoves-cluster"): + k8s_utils.set_context(ctx) + for pv in volumes.get_pvs(): + print(pv) + + +def delete_released_pvs(ctx="kind-datacoves-cluster"): + k8s_utils.set_context(ctx) + volumes.delete_released_pvs() + + +def download_pricing_model(): + """Downloads the pricing model from stripe.""" + cluster_domain = choose_cluster() + stripe_utils.download_pricing_model(cluster_domain) + + +def pricing_model(): + """Shows the pricing model downloaded from stripe.""" + cluster_domain = choose_cluster() + return stripe_utils.pricing_model(cluster_domain) + + +def copy_to_stripe_test(): + """Copy products and prices from selected cluster's stripe account into + datacoveslocal.com associated stripe account""" + cluster_domain = choose_cluster() + return stripe_copy.copy_to_test(cluster_domain) + + +def reveal_secrets(auto_confirm: str = "-n"): + """Command to reveal secrets""" + require_user_confirm = auto_confirm != "-y" + setup_secrets.reveal_secrets(prompt=require_user_confirm) + + +def sync_secrets(auto_confirm: str = "-n"): + """Command to create or update secrets""" + if auto_confirm == "-y" or questionary.confirm("Do you want sync secrets?").ask(): + setup_secrets.sync_secrets() + + +def merge_secrets(branch_to_merge: str, auto_confirm: str = "-n"): + """Command to merge secrets""" + if auto_confirm == "-y" or questionary.confirm("Do you want merge secrets?").ask(): + setup_secrets.merge_secrets(branch_to_merge=branch_to_merge) + + +def compile_api_requirements(): + """ + Compile API requirements using pip-tools. + + This function performs the following operations: + 1. Copies 'requirements.in' from local to the pod. + 2. Copies the compiled 'requirements.txt' back from the pod to local. + 3. Removes the temporary requirements files in the pod. + """ + directory = Path(__file__).parent / "src/core/api/" + in_path = directory / "requirements.in" + out_path = directory / "requirements.txt" + k8s_utils.set_context("kind-datacoves-cluster") + pod = k8s_utils.pod_for_deployment("core", core.DatacovesCoreK8sName.API.value) + run = k8s_utils.cmd_runner_in_pod( + "core", pod, container=core.DatacovesCoreK8sName.API.value + ) + try: + # Copy requirements.in to the pod (must be removed when finishing) + cmd.run(f"kubectl -n core cp {in_path.as_posix()} {pod}:/usr/src/app") + run("pip-compile -v -o requirements.txt") + # Copy compiled requirements.txt to host local machine. + cmd.run( + f"kubectl -n core cp {pod}:/usr/src/app/requirements.txt {out_path.as_posix()}" + ) + except Exception as e: + # This block will execute if there's an exception in the 'try' block. + print(f"Error while compiling: {e}") + finally: + # Delete temporary files in pod. + run("rm requirements.txt requirements.in") + + +def dumpdata(): + """Dump a Cluster's Django database to a desired destination.""" + print_logo() + cluster_domain = choose_cluster() + dump_database(cluster_domain) + + +def compile_docs(source, dest): + """Compile Datacoves docs into static files""" + from lib.doc_compiler import main + + main(source, dest) + + +if __name__ == "__main__": + program_name, *args = sys.argv + + # Run from the directory that contains this script. + os.chdir(os.path.dirname(program_name)) + + try: + cmd.main() + except KeyboardInterrupt: + print("Ctrl-C, exit") diff --git a/config/datacoveslocal.com/base/calico-v3.25.yaml b/config/datacoveslocal.com/base/calico-v3.25.yaml new file mode 100644 index 00000000..dc379ace --- /dev/null +++ b/config/datacoveslocal.com/base/calico-v3.25.yaml @@ -0,0 +1,5257 @@ +# https://docs.projectcalico.org/v3.25/manifests/calico.yaml +--- +# Source: calico/templates/calico-kube-controllers.yaml +# This manifest creates a Pod Disruption Budget for Controller to allow K8s Cluster Autoscaler to evict + +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers +--- +# Source: calico/templates/calico-kube-controllers.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-config.yaml +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Typha is disabled. + typha_service_name: "none" + # Configure the backend to use. + calico_backend: "bird" + + # Configure the MTU to use for workload interfaces and tunnels. + # By default, MTU is auto-detected, and explicitly setting this field should not be required. + # You can override auto-detection by providing a non-zero value. + veth_mtu: "0" + + # The CNI network configuration to install on each node. The special + # values in this config will be automatically populated. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "calico", + "log_level": "info", + "log_file_path": "/var/log/calico/cni/cni.log", + "datastore_type": "kubernetes", + "nodename": "__KUBERNETES_NODE_NAME__", + "mtu": __CNI_MTU__, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + }, + { + "type": "bandwidth", + "capabilities": {"bandwidth": true} + } + ] + } +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgpconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPConfiguration + listKind: BGPConfigurationList + plural: bgpconfigurations + singular: bgpconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: BGPConfiguration contains the configuration for any BGP routing. + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: BGPConfigurationSpec contains the values of the BGP configuration. + properties: + asNumber: + description: + "ASNumber is the default AS number used by a node. [Default: + 64512]" + format: int32 + type: integer + bindMode: + description: + BindMode indicates whether to listen for BGP connections + on all addresses (None) or only on the node's canonical IP address + Node.Spec.BGP.IPvXAddress (NodeIP). Default behaviour is to listen + for BGP connections on all addresses. + type: string + communities: + description: + Communities is a list of BGP community values and their + arbitrary names for tagging routes. + items: + description: + Community contains standard or large community value + and its name. + properties: + name: + description: Name given to community value. + type: string + value: + description: + Value must be of format `aa:nn` or `aa:nn:mm`. + For standard community use `aa:nn` format, where `aa` and + `nn` are 16 bit number. For large community use `aa:nn:mm` + format, where `aa`, `nn` and `mm` are 32 bit number. Where, + `aa` is an AS Number, `nn` and `mm` are per-AS identifier. + pattern: ^(\d+):(\d+)$|^(\d+):(\d+):(\d+)$ + type: string + type: object + type: array + ignoredInterfaces: + description: + IgnoredInterfaces indicates the network interfaces that + needs to be excluded when reading device routes. + items: + type: string + type: array + listenPort: + description: + ListenPort is the port where BGP protocol should listen. + Defaults to 179 + maximum: 65535 + minimum: 1 + type: integer + logSeverityScreen: + description: + "LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: INFO]" + type: string + nodeMeshMaxRestartTime: + description: + Time to allow for software restart for node-to-mesh peerings. When + specified, this is configured as the graceful restart timeout. When + not specified, the BIRD default of 120s is used. This field can + only be set on the default BGPConfiguration instance and requires + that NodeMesh is enabled + type: string + nodeMeshPassword: + description: + Optional BGP password for full node-to-mesh peerings. + This field can only be set on the default BGPConfiguration instance + and requires that NodeMesh is enabled + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: + The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: + "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?" + type: string + optional: + description: + Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + nodeToNodeMeshEnabled: + description: + "NodeToNodeMeshEnabled sets whether full node to node + BGP mesh is enabled. [Default: true]" + type: boolean + prefixAdvertisements: + description: + PrefixAdvertisements contains per-prefix advertisement + configuration. + items: + description: + PrefixAdvertisement configures advertisement properties + for the specified CIDR. + properties: + cidr: + description: CIDR for which properties should be advertised. + type: string + communities: + description: + Communities can be list of either community names + already defined in `Specs.Communities` or community value + of format `aa:nn` or `aa:nn:mm`. For standard community use + `aa:nn` format, where `aa` and `nn` are 16 bit number. For + large community use `aa:nn:mm` format, where `aa`, `nn` and + `mm` are 32 bit number. Where,`aa` is an AS Number, `nn` and + `mm` are per-AS identifier. + items: + type: string + type: array + type: object + type: array + serviceClusterIPs: + description: + ServiceClusterIPs are the CIDR blocks from which service + cluster IPs are allocated. If specified, Calico will advertise these + blocks, as well as any cluster IPs within them. + items: + description: + ServiceClusterIPBlock represents a single allowed ClusterIP + CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceExternalIPs: + description: + ServiceExternalIPs are the CIDR blocks for Kubernetes + Service External IPs. Kubernetes Service ExternalIPs will only be + advertised if they are within one of these blocks. + items: + description: + ServiceExternalIPBlock represents a single allowed + External IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + serviceLoadBalancerIPs: + description: + ServiceLoadBalancerIPs are the CIDR blocks for Kubernetes + Service LoadBalancer IPs. Kubernetes Service status.LoadBalancer.Ingress + IPs will only be advertised if they are within one of these blocks. + items: + description: + ServiceLoadBalancerIPBlock represents a single allowed + LoadBalancer IP CIDR block. + properties: + cidr: + type: string + type: object + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: bgppeers.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BGPPeer + listKind: BGPPeerList + plural: bgppeers + singular: bgppeer + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: BGPPeerSpec contains the specification for a BGPPeer resource. + properties: + asNumber: + description: The AS Number of the peer. + format: int32 + type: integer + keepOriginalNextHop: + description: + Option to keep the original nexthop field when routes + are sent to a BGP Peer. Setting "true" configures the selected BGP + Peers node to use the "next hop keep;" instead of "next hop self;"(default) + in the specific branch of the Node on "bird.cfg". + type: boolean + maxRestartTime: + description: + Time to allow for software restart. When specified, + this is configured as the graceful restart timeout. When not specified, + the BIRD default of 120s is used. + type: string + node: + description: + The node name identifying the Calico node instance that + is targeted by this peer. If this is not set, and no nodeSelector + is specified, then this BGP peer selects all nodes in the cluster. + type: string + nodeSelector: + description: + Selector for the nodes that should have this peering. When + this is set, the Node field must be empty. + type: string + numAllowedLocalASNumbers: + description: + Maximum number of local AS numbers that are allowed in + the AS path for received routes. This removes BGP loop prevention + and should only be used if absolutely necesssary. + format: int32 + type: integer + password: + description: + Optional BGP password for the peerings generated by this + BGPPeer resource. + properties: + secretKeyRef: + description: Selects a key of a secret in the node pod's namespace. + properties: + key: + description: + The key of the secret to select from. Must be + a valid secret key. + type: string + name: + description: + "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + TODO: Add other useful fields. apiVersion, kind, uid?" + type: string + optional: + description: + Specify whether the Secret or its key must be + defined + type: boolean + required: + - key + type: object + type: object + peerIP: + description: + The IP address of the peer followed by an optional port + number to peer with. If port number is given, format should be `[]:port` + or `:` for IPv4. If optional port number is not set, + and this peer IP and ASNumber belongs to a calico/node with ListenPort + set in BGPConfiguration, then we use that port to peer. + type: string + peerSelector: + description: + Selector for the remote nodes to peer with. When this + is set, the PeerIP and ASNumber fields must be empty. For each + peering between the local node and selected remote nodes, we configure + an IPv4 peering if both ends have NodeBGPSpec.IPv4Address specified, + and an IPv6 peering if both ends have NodeBGPSpec.IPv6Address specified. The + remote AS number comes from the remote node's NodeBGPSpec.ASNumber, + or the global default if that is not set. + type: string + reachableBy: + description: + Add an exact, i.e. /32, static route toward peer IP in + order to prevent route flapping. ReachableBy contains the address + of the gateway which peer can be reached by. + type: string + sourceAddress: + description: + Specifies whether and how to configure a source address + for the peerings generated by this BGPPeer resource. Default value + "UseNodeIP" means to configure the node IP as the source address. "None" + means not to configure a source address. + type: string + ttlSecurity: + description: + TTLSecurity enables the generalized TTL security mechanism + (GTSM) which protects against spoofed packets by ignoring received + packets with a smaller than expected TTL value. The provided value + is the number of hops (edges) between the peers. + type: integer + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: blockaffinities.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: BlockAffinity + listKind: BlockAffinityList + plural: blockaffinities + singular: blockaffinity + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + BlockAffinitySpec contains the specification for a BlockAffinity + resource. + properties: + cidr: + type: string + deleted: + description: + Deleted indicates that this block affinity is being deleted. + This field is a string for compatibility with older releases that + mistakenly treat this field as a string. + type: string + node: + type: string + state: + type: string + required: + - cidr + - deleted + - node + - state + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: caliconodestatuses.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: CalicoNodeStatus + listKind: CalicoNodeStatusList + plural: caliconodestatuses + singular: caliconodestatus + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + CalicoNodeStatusSpec contains the specification for a CalicoNodeStatus + resource. + properties: + classes: + description: + Classes declares the types of information to monitor + for this calico/node, and allows for selective status reporting + about certain subsets of information. + items: + type: string + type: array + node: + description: + The node name identifies the Calico node instance for + node status. + type: string + updatePeriodSeconds: + description: + UpdatePeriodSeconds is the period at which CalicoNodeStatus + should be updated. Set to 0 to disable CalicoNodeStatus refresh. + Maximum update period is one day. + format: int32 + type: integer + type: object + status: + description: + CalicoNodeStatusStatus defines the observed state of CalicoNodeStatus. + No validation needed for status since it is updated by Calico. + properties: + agent: + description: Agent holds agent status on the node. + properties: + birdV4: + description: BIRDV4 represents the latest observed status of bird4. + properties: + lastBootTime: + description: + LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: + LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + birdV6: + description: BIRDV6 represents the latest observed status of bird6. + properties: + lastBootTime: + description: + LastBootTime holds the value of lastBootTime + from bird.ctl output. + type: string + lastReconfigurationTime: + description: + LastReconfigurationTime holds the value of lastReconfigTime + from bird.ctl output. + type: string + routerID: + description: Router ID used by bird. + type: string + state: + description: The state of the BGP Daemon. + type: string + version: + description: Version of the BGP daemon + type: string + type: object + type: object + bgp: + description: BGP holds node BGP status. + properties: + numberEstablishedV4: + description: The total number of IPv4 established bgp sessions. + type: integer + numberEstablishedV6: + description: The total number of IPv6 established bgp sessions. + type: integer + numberNotEstablishedV4: + description: The total number of IPv4 non-established bgp sessions. + type: integer + numberNotEstablishedV6: + description: The total number of IPv6 non-established bgp sessions. + type: integer + peersV4: + description: PeersV4 represents IPv4 BGP peers status on the node. + items: + description: + CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: + IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: + Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + peersV6: + description: PeersV6 represents IPv6 BGP peers status on the node. + items: + description: + CalicoNodePeer contains the status of BGP peers + on the node. + properties: + peerIP: + description: + IP address of the peer whose condition we are + reporting. + type: string + since: + description: Since the state or reason last changed. + type: string + state: + description: State is the BGP session state. + type: string + type: + description: + Type indicates whether this peer is configured + via the node-to-node mesh, or via en explicit global or + per-node BGPPeer object. + type: string + type: object + type: array + required: + - numberEstablishedV4 + - numberEstablishedV6 + - numberNotEstablishedV4 + - numberNotEstablishedV6 + type: object + lastUpdated: + description: + LastUpdated is a timestamp representing the server time + when CalicoNodeStatus object last updated. It is represented in + RFC3339 form and is in UTC. + format: date-time + nullable: true + type: string + routes: + description: + Routes reports routes known to the Calico BGP daemon + on the node. + properties: + routesV4: + description: RoutesV4 represents IPv4 routes on the node. + items: + description: + CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: + LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: + If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: + Type of the source where a route is learned + from. + type: string + type: object + type: + description: + Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + routesV6: + description: RoutesV6 represents IPv6 routes on the node. + items: + description: + CalicoNodeRoute contains the status of BGP routes + on the node. + properties: + destination: + description: Destination of the route. + type: string + gateway: + description: Gateway for the destination. + type: string + interface: + description: Interface for the destination + type: string + learnedFrom: + description: + LearnedFrom contains information regarding + where this route originated. + properties: + peerIP: + description: + If sourceType is NodeMesh or BGPPeer, IP + address of the router that sent us this route. + type: string + sourceType: + description: + Type of the source where a route is learned + from. + type: string + type: object + type: + description: + Type indicates if the route is being used for + forwarding or not. + type: string + type: object + type: array + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterinformations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: ClusterInformation + listKind: ClusterInformationList + plural: clusterinformations + singular: clusterinformation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: ClusterInformation contains the cluster specific information. + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + ClusterInformationSpec contains the values of describing + the cluster. + properties: + calicoVersion: + description: + CalicoVersion is the version of Calico that the cluster + is running + type: string + clusterGUID: + description: ClusterGUID is the GUID of the cluster + type: string + clusterType: + description: ClusterType describes the type of the cluster + type: string + datastoreReady: + description: + DatastoreReady is used during significant datastore migrations + to signal to components such as Felix that it should wait before + accessing the datastore. + type: boolean + variant: + description: Variant declares which variant of Calico should be active. + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: felixconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: FelixConfiguration + listKind: FelixConfigurationList + plural: felixconfigurations + singular: felixconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Felix Configuration contains the configuration for Felix. + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: FelixConfigurationSpec contains the values of the Felix configuration. + properties: + allowIPIPPacketsFromWorkloads: + description: + "AllowIPIPPacketsFromWorkloads controls whether Felix + will add a rule to drop IPIP encapsulated traffic from workloads + [Default: false]" + type: boolean + allowVXLANPacketsFromWorkloads: + description: + "AllowVXLANPacketsFromWorkloads controls whether Felix + will add a rule to drop VXLAN encapsulated traffic from workloads + [Default: false]" + type: boolean + awsSrcDstCheck: + description: + 'Set source-destination-check on AWS EC2 instances. Accepted + value must be one of "DoNothing", "Enable" or "Disable". [Default: + DoNothing]' + enum: + - DoNothing + - Enable + - Disable + type: string + bpfConnectTimeLoadBalancingEnabled: + description: + "BPFConnectTimeLoadBalancingEnabled when in BPF mode, + controls whether Felix installs the connection-time load balancer. The + connect-time load balancer is required for the host to be able to + reach Kubernetes services and it improves the performance of pod-to-service + connections. The only reason to disable it is for debugging purposes. [Default: + true]" + type: boolean + bpfDataIfacePattern: + description: + BPFDataIfacePattern is a regular expression that controls + which interfaces Felix should attach BPF programs to in order to + catch traffic to/from the network. This needs to match the interfaces + that Calico workload traffic flows over as well as any interfaces + that handle incoming traffic to nodeports and services from outside + the cluster. It should not match the workload interfaces (usually + named cali...). + type: string + bpfDisableUnprivileged: + description: + "BPFDisableUnprivileged, if enabled, Felix sets the kernel.unprivileged_bpf_disabled + sysctl to disable unprivileged use of BPF. This ensures that unprivileged + users cannot access Calico's BPF maps and cannot insert their own + BPF programs to interfere with Calico's. [Default: true]" + type: boolean + bpfEnabled: + description: + "BPFEnabled, if enabled Felix will use the BPF dataplane. + [Default: false]" + type: boolean + bpfEnforceRPF: + description: + "BPFEnforceRPF enforce strict RPF on all host interfaces + with BPF programs regardless of what is the per-interfaces or global + setting. Possible values are Disabled, Strict or Loose. [Default: + Strict]" + type: string + bpfExtToServiceConnmark: + description: + "BPFExtToServiceConnmark in BPF mode, control a 32bit + mark that is set on connections from an external client to a local + service. This mark allows us to control how packets of that connection + are routed within the host and how is routing interpreted by RPF + check. [Default: 0]" + type: integer + bpfExternalServiceMode: + description: + 'BPFExternalServiceMode in BPF mode, controls how connections + from outside the cluster to services (node ports and cluster IPs) + are forwarded to remote workloads. If set to "Tunnel" then both + request and response traffic is tunneled to the remote node. If + set to "DSR", the request traffic is tunneled but the response traffic + is sent directly from the remote node. In "DSR" mode, the remote + node appears to use the IP of the ingress node; this requires a + permissive L2 network. [Default: Tunnel]' + type: string + bpfHostConntrackBypass: + description: + "BPFHostConntrackBypass Controls whether to bypass Linux + conntrack in BPF mode for workloads and services. [Default: true + - bypass Linux conntrack]" + type: boolean + bpfKubeProxyEndpointSlicesEnabled: + description: + BPFKubeProxyEndpointSlicesEnabled in BPF mode, controls + whether Felix's embedded kube-proxy accepts EndpointSlices or not. + type: boolean + bpfKubeProxyIptablesCleanupEnabled: + description: + "BPFKubeProxyIptablesCleanupEnabled, if enabled in BPF + mode, Felix will proactively clean up the upstream Kubernetes kube-proxy's + iptables chains. Should only be enabled if kube-proxy is not running. [Default: + true]" + type: boolean + bpfKubeProxyMinSyncPeriod: + description: + "BPFKubeProxyMinSyncPeriod, in BPF mode, controls the + minimum time between updates to the dataplane for Felix's embedded + kube-proxy. Lower values give reduced set-up latency. Higher values + reduce Felix CPU usage by batching up more work. [Default: 1s]" + type: string + bpfL3IfacePattern: + description: + BPFL3IfacePattern is a regular expression that allows + to list tunnel devices like wireguard or vxlan (i.e., L3 devices) + in addition to BPFDataIfacePattern. That is, tunnel interfaces not + created by Calico, that Calico workload traffic flows over as well + as any interfaces that handle incoming traffic to nodeports and + services from outside the cluster. + type: string + bpfLogLevel: + description: + 'BPFLogLevel controls the log level of the BPF programs + when in BPF dataplane mode. One of "Off", "Info", or "Debug". The + logs are emitted to the BPF trace pipe, accessible with the command + `tc exec bpf debug`. [Default: Off].' + type: string + bpfMapSizeConntrack: + description: + "BPFMapSizeConntrack sets the size for the conntrack + map. This map must be large enough to hold an entry for each active + connection. Warning: changing the size of the conntrack map can + cause disruption." + type: integer + bpfMapSizeIPSets: + description: + BPFMapSizeIPSets sets the size for ipsets map. The IP + sets map must be large enough to hold an entry for each endpoint + matched by every selector in the source/destination matches in network + policy. Selectors such as "all()" can result in large numbers of + entries (one entry per endpoint in that case). + type: integer + bpfMapSizeIfState: + description: + BPFMapSizeIfState sets the size for ifstate map. The + ifstate map must be large enough to hold an entry for each device + (host + workloads) on a host. + type: integer + bpfMapSizeNATAffinity: + type: integer + bpfMapSizeNATBackend: + description: + BPFMapSizeNATBackend sets the size for nat back end map. + This is the total number of endpoints. This is mostly more than + the size of the number of services. + type: integer + bpfMapSizeNATFrontend: + description: + BPFMapSizeNATFrontend sets the size for nat front end + map. FrontendMap should be large enough to hold an entry for each + nodeport, external IP and each port in each service. + type: integer + bpfMapSizeRoute: + description: + BPFMapSizeRoute sets the size for the routes map. The + routes map should be large enough to hold one entry per workload + and a handful of entries per host (enough to cover its own IPs and + tunnel IPs). + type: integer + bpfPSNATPorts: + anyOf: + - type: integer + - type: string + description: + "BPFPSNATPorts sets the range from which we randomly + pick a port if there is a source port collision. This should be + within the ephemeral range as defined by RFC 6056 (1024–65535) and + preferably outside the ephemeral ranges used by common operating + systems. Linux uses 32768–60999, while others mostly use the IANA + defined range 49152–65535. It is not necessarily a problem if this + range overlaps with the operating systems. Both ends of the range + are inclusive. [Default: 20000:29999]" + pattern: ^.* + x-kubernetes-int-or-string: true + bpfPolicyDebugEnabled: + description: + BPFPolicyDebugEnabled when true, Felix records detailed + information about the BPF policy programs, which can be examined + with the calico-bpf command-line tool. + type: boolean + chainInsertMode: + description: + "ChainInsertMode controls whether Felix hooks the kernel's + top-level iptables chains by inserting a rule at the top of the + chain or by appending a rule at the bottom. insert is the safe default + since it prevents Calico's rules from being bypassed. If you switch + to append mode, be sure that the other rules in the chains signal + acceptance by falling through to the Calico rules, otherwise the + Calico policy will be bypassed. [Default: insert]" + type: string + dataplaneDriver: + description: + DataplaneDriver filename of the external dataplane driver + to use. Only used if UseInternalDataplaneDriver is set to false. + type: string + dataplaneWatchdogTimeout: + description: + "DataplaneWatchdogTimeout is the readiness/liveness timeout + used for Felix's (internal) dataplane driver. Increase this value + if you experience spurious non-ready or non-live events when Felix + is under heavy load. Decrease the value to get felix to report non-live + or non-ready more quickly. [Default: 90s] \n Deprecated: replaced + by the generic HealthTimeoutOverrides." + type: string + debugDisableLogDropping: + type: boolean + debugMemoryProfilePath: + type: string + debugSimulateCalcGraphHangAfter: + type: string + debugSimulateDataplaneHangAfter: + type: string + defaultEndpointToHostAction: + description: + 'DefaultEndpointToHostAction controls what happens to + traffic that goes from a workload endpoint to the host itself (after + the traffic hits the endpoint egress policy). By default Calico + blocks traffic from workload endpoints to the host itself with an + iptables "DROP" action. If you want to allow some or all traffic + from endpoint to host, set this parameter to RETURN or ACCEPT. Use + RETURN if you have your own rules in the iptables "INPUT" chain; + Calico will insert its rules at the top of that chain, then "RETURN" + packets to the "INPUT" chain once it has completed processing workload + endpoint egress policy. Use ACCEPT to unconditionally accept packets + from workloads after processing workload endpoint egress policy. + [Default: Drop]' + type: string + deviceRouteProtocol: + description: + This defines the route protocol added to programmed device + routes, by default this will be RTPROT_BOOT when left blank. + type: integer + deviceRouteSourceAddress: + description: + This is the IPv4 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + deviceRouteSourceAddressIPv6: + description: + This is the IPv6 source address to use on programmed + device routes. By default the source address is left blank, leaving + the kernel to choose the source address used. + type: string + disableConntrackInvalidCheck: + type: boolean + endpointReportingDelay: + type: string + endpointReportingEnabled: + type: boolean + externalNodesList: + description: + ExternalNodesCIDRList is a list of CIDR's of external-non-calico-nodes + which may source tunnel traffic and have the tunneled traffic be + accepted at calico nodes. + items: + type: string + type: array + failsafeInboundHostPorts: + description: + 'FailsafeInboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow incoming traffic to host endpoints + on irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all inbound host ports, use the value + none. The default value allows ssh access and DHCP. [Default: tcp:22, + udp:68, tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, tcp:6667]' + items: + description: + ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + failsafeOutboundHostPorts: + description: + 'FailsafeOutboundHostPorts is a list of UDP/TCP ports + and CIDRs that Felix will allow outgoing traffic from host endpoints + to irrespective of the security policy. This is useful to avoid + accidentally cutting off a host with incorrect configuration. For + back-compatibility, if the protocol is not specified, it defaults + to "tcp". If a CIDR is not specified, it will allow traffic from + all addresses. To disable all outbound host ports, use the value + none. The default value opens etcd''s standard ports to ensure that + Felix does not get cut off from etcd as well as allowing DHCP and + DNS. [Default: tcp:179, tcp:2379, tcp:2380, tcp:6443, tcp:6666, + tcp:6667, udp:53, udp:67]' + items: + description: + ProtoPort is combination of protocol, port, and CIDR. + Protocol and port must be specified. + properties: + net: + type: string + port: + type: integer + protocol: + type: string + required: + - port + - protocol + type: object + type: array + featureDetectOverride: + description: + FeatureDetectOverride is used to override feature detection + based on auto-detected platform capabilities. Values are specified + in a comma separated list with no spaces, example; "SNATFullyRandom=true,MASQFullyRandom=false,RestoreSupportsLock=". "true" + or "false" will force the feature, empty or omitted values are auto-detected. + type: string + featureGates: + description: + FeatureGates is used to enable or disable tech-preview + Calico features. Values are specified in a comma separated list + with no spaces, example; "BPFConnectTimeLoadBalancingWorkaround=enabled,XyZ=false". + This is used to enable features that are not fully production ready. + type: string + floatingIPs: + description: + FloatingIPs configures whether or not Felix will program + non-OpenStack floating IP addresses. (OpenStack-derived floating + IPs are always programmed, regardless of this setting.) + enum: + - Enabled + - Disabled + type: string + genericXDPEnabled: + description: + "GenericXDPEnabled enables Generic XDP so network cards + that don't support XDP offload or driver modes can use XDP. This + is not recommended since it doesn't provide better performance + than iptables. [Default: false]" + type: boolean + healthEnabled: + type: boolean + healthHost: + type: string + healthPort: + type: integer + healthTimeoutOverrides: + description: + HealthTimeoutOverrides allows the internal watchdog timeouts + of individual subcomponents to be overriden. This is useful for + working around "false positive" liveness timeouts that can occur + in particularly stressful workloads or if CPU is constrained. For + a list of active subcomponents, see Felix's logs. + items: + properties: + name: + type: string + timeout: + type: string + required: + - name + - timeout + type: object + type: array + interfaceExclude: + description: + "InterfaceExclude is a comma-separated list of interfaces + that Felix should exclude when monitoring for host endpoints. The + default value ensures that Felix ignores Kubernetes' IPVS dummy + interface, which is used internally by kube-proxy. If you want to + exclude multiple interface names using a single value, the list + supports regular expressions. For regular expressions you must wrap + the value with '/'. For example having values '/^kube/,veth1' + will exclude all interfaces that begin with 'kube' and also the + interface 'veth1'. [Default: kube-ipvs0]" + type: string + interfacePrefix: + description: + "InterfacePrefix is the interface name prefix that identifies + workload endpoints and so distinguishes them from host endpoint + interfaces. Note: in environments other than bare metal, the orchestrators + configure this appropriately. For example our Kubernetes and Docker + integrations set the 'cali' value, and our OpenStack integration + sets the 'tap' value. [Default: cali]" + type: string + interfaceRefreshInterval: + description: + InterfaceRefreshInterval is the period at which Felix + rescans local interfaces to verify their state. The rescan can be + disabled by setting the interval to 0. + type: string + ipipEnabled: + description: + "IPIPEnabled overrides whether Felix should configure + an IPIP interface on the host. Optional as Felix determines this + based on the existing IP pools. [Default: nil (unset)]" + type: boolean + ipipMTU: + description: + "IPIPMTU is the MTU to set on the tunnel device. See + Configuring MTU [Default: 1440]" + type: integer + ipsetsRefreshInterval: + description: + "IpsetsRefreshInterval is the period at which Felix re-checks + all iptables state to ensure that no other process has accidentally + broken Calico's rules. Set to 0 to disable iptables refresh. [Default: + 90s]" + type: string + iptablesBackend: + description: + IptablesBackend specifies which backend of iptables will + be used. The default is Auto. + type: string + iptablesFilterAllowAction: + type: string + iptablesLockFilePath: + description: + "IptablesLockFilePath is the location of the iptables + lock file. You may need to change this if the lock file is not in + its standard location (for example if you have mapped it into Felix's + container at a different path). [Default: /run/xtables.lock]" + type: string + iptablesLockProbeInterval: + description: + "IptablesLockProbeInterval is the time that Felix will + wait between attempts to acquire the iptables lock if it is not + available. Lower values make Felix more responsive when the lock + is contended, but use more CPU. [Default: 50ms]" + type: string + iptablesLockTimeout: + description: + "IptablesLockTimeout is the time that Felix will wait + for the iptables lock, or 0, to disable. To use this feature, Felix + must share the iptables lock file with all other processes that + also take the lock. When running Felix inside a container, this + requires the /run directory of the host to be mounted into the calico/node + or calico/felix container. [Default: 0s disabled]" + type: string + iptablesMangleAllowAction: + type: string + iptablesMarkMask: + description: + "IptablesMarkMask is the mask that Felix selects its + IPTables Mark bits from. Should be a 32 bit hexadecimal number with + at least 8 bits set, none of which clash with any other mark bits + in use on the system. [Default: 0xff000000]" + format: int32 + type: integer + iptablesNATOutgoingInterfaceFilter: + type: string + iptablesPostWriteCheckInterval: + description: + "IptablesPostWriteCheckInterval is the period after Felix + has done a write to the dataplane that it schedules an extra read + back in order to check the write was not clobbered by another process. + This should only occur if another application on the system doesn't + respect the iptables lock. [Default: 1s]" + type: string + iptablesRefreshInterval: + description: + "IptablesRefreshInterval is the period at which Felix + re-checks the IP sets in the dataplane to ensure that no other process + has accidentally broken Calico's rules. Set to 0 to disable IP + sets refresh. Note: the default for this value is lower than the + other refresh intervals as a workaround for a Linux kernel bug that + was fixed in kernel version 4.11. If you are using v4.11 or greater + you may want to set this to, a higher value to reduce Felix CPU + usage. [Default: 10s]" + type: string + ipv6Support: + description: + IPv6Support controls whether Felix enables support for + IPv6 (if supported by the in-use dataplane). + type: boolean + kubeNodePortRanges: + description: + "KubeNodePortRanges holds list of port ranges used for + service node ports. Only used if felix detects kube-proxy running + in ipvs mode. Felix uses these ranges to separate host and workload + traffic. [Default: 30000:32767]." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + logDebugFilenameRegex: + description: + LogDebugFilenameRegex controls which source code files + have their Debug log output included in the logs. Only logs from + files with names that match the given regular expression are included. The + filter only applies to Debug level logs. + type: string + logFilePath: + description: + "LogFilePath is the full path to the Felix log. Set to + none to disable file logging. [Default: /var/log/calico/felix.log]" + type: string + logPrefix: + description: + "LogPrefix is the log prefix that Felix uses when rendering + LOG rules. [Default: calico-packet]" + type: string + logSeverityFile: + description: + "LogSeverityFile is the log severity above which logs + are sent to the log file. [Default: Info]" + type: string + logSeverityScreen: + description: + "LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]" + type: string + logSeveritySys: + description: + "LogSeveritySys is the log severity above which logs + are sent to the syslog. Set to None for no logging to syslog. [Default: + Info]" + type: string + maxIpsetSize: + type: integer + metadataAddr: + description: + "MetadataAddr is the IP address or domain name of the + server that can answer VM queries for cloud-init metadata. In OpenStack, + this corresponds to the machine running nova-api (or in Ubuntu, + nova-api-metadata). A value of none (case insensitive) means that + Felix should not set up any NAT rule for the metadata path. [Default: + 127.0.0.1]" + type: string + metadataPort: + description: + "MetadataPort is the port of the metadata server. This, + combined with global.MetadataAddr (if not 'None'), is used to + set up a NAT rule, from 169.254.169.254:80 to MetadataAddr:MetadataPort. + In most cases this should not need to be changed [Default: 8775]." + type: integer + mtuIfacePattern: + description: + MTUIfacePattern is a regular expression that controls + which interfaces Felix should scan in order to calculate the host's + MTU. This should not match workload interfaces (usually named cali...). + type: string + natOutgoingAddress: + description: + NATOutgoingAddress specifies an address to use when performing + source NAT for traffic in a natOutgoing pool that is leaving the + network. By default the address used is an address on the interface + the traffic is leaving on (ie it uses the iptables MASQUERADE target) + type: string + natPortRange: + anyOf: + - type: integer + - type: string + description: + NATPortRange specifies the range of ports that is used + for port mapping when doing outgoing NAT. When unset the default + behavior of the network stack is used. + pattern: ^.* + x-kubernetes-int-or-string: true + netlinkTimeout: + type: string + openstackRegion: + description: + "OpenstackRegion is the name of the region that a particular + Felix belongs to. In a multi-region Calico/OpenStack deployment, + this must be configured somehow for each Felix (here in the datamodel, + or in felix.cfg or the environment on each compute node), and must + match the [calico] openstack_region value configured in neutron.conf + on each node. [Default: Empty]" + type: string + policySyncPathPrefix: + description: + "PolicySyncPathPrefix is used to by Felix to communicate + policy changes to external services, like Application layer policy. + [Default: Empty]" + type: string + prometheusGoMetricsEnabled: + description: + "PrometheusGoMetricsEnabled disables Go runtime metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]" + type: boolean + prometheusMetricsEnabled: + description: + "PrometheusMetricsEnabled enables the Prometheus metrics + server in Felix if set to true. [Default: false]" + type: boolean + prometheusMetricsHost: + description: + "PrometheusMetricsHost is the host that the Prometheus + metrics server should bind to. [Default: empty]" + type: string + prometheusMetricsPort: + description: + "PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. [Default: 9091]" + type: integer + prometheusProcessMetricsEnabled: + description: + "PrometheusProcessMetricsEnabled disables process metrics + collection, which the Prometheus client does by default, when set + to false. This reduces the number of metrics reported, reducing + Prometheus load. [Default: true]" + type: boolean + prometheusWireGuardMetricsEnabled: + description: + "PrometheusWireGuardMetricsEnabled disables wireguard + metrics collection, which the Prometheus client does by default, + when set to false. This reduces the number of metrics reported, + reducing Prometheus load. [Default: true]" + type: boolean + removeExternalRoutes: + description: + Whether or not to remove device routes that have not + been programmed by Felix. Disabling this will allow external applications + to also add device routes. This is enabled by default which means + we will remove externally added routes. + type: boolean + reportingInterval: + description: + "ReportingInterval is the interval at which Felix reports + its status into the datastore or 0 to disable. Must be non-zero + in OpenStack deployments. [Default: 30s]" + type: string + reportingTTL: + description: + "ReportingTTL is the time-to-live setting for process-wide + status reports. [Default: 90s]" + type: string + routeRefreshInterval: + description: + "RouteRefreshInterval is the period at which Felix re-checks + the routes in the dataplane to ensure that no other process has + accidentally broken Calico's rules. Set to 0 to disable route refresh. + [Default: 90s]" + type: string + routeSource: + description: + "RouteSource configures where Felix gets its routing + information. - WorkloadIPs: use workload endpoints to construct + routes. - CalicoIPAM: the default - use IPAM data to construct routes." + type: string + routeSyncDisabled: + description: + RouteSyncDisabled will disable all operations performed + on the route table. Set to true to run in network-policy mode only. + type: boolean + routeTableRange: + description: + Deprecated in favor of RouteTableRanges. Calico programs + additional Linux route tables for various purposes. RouteTableRange + specifies the indices of the route tables that Calico should use. + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + routeTableRanges: + description: + Calico programs additional Linux route tables for various + purposes. RouteTableRanges specifies a set of table index ranges + that Calico should use. Deprecates`RouteTableRange`, overrides `RouteTableRange`. + items: + properties: + max: + type: integer + min: + type: integer + required: + - max + - min + type: object + type: array + serviceLoopPrevention: + description: + 'When service IP advertisement is enabled, prevent routing + loops to service IPs that are not in use, by dropping or rejecting + packets that do not get DNAT''d by kube-proxy. Unless set to "Disabled", + in which case such routing loops continue to be allowed. [Default: + Drop]' + type: string + sidecarAccelerationEnabled: + description: + "SidecarAccelerationEnabled enables experimental sidecar + acceleration [Default: false]" + type: boolean + usageReportingEnabled: + description: + "UsageReportingEnabled reports anonymous Calico version + number and cluster size to projectcalico.org. Logs warnings returned + by the usage server. For example, if a significant security vulnerability + has been discovered in the version of Calico being used. [Default: + true]" + type: boolean + usageReportingInitialDelay: + description: + "UsageReportingInitialDelay controls the minimum delay + before Felix makes a report. [Default: 300s]" + type: string + usageReportingInterval: + description: + "UsageReportingInterval controls the interval at which + Felix makes reports. [Default: 86400s]" + type: string + useInternalDataplaneDriver: + description: + UseInternalDataplaneDriver, if true, Felix will use its + internal dataplane programming logic. If false, it will launch + an external dataplane driver and communicate with it over protobuf. + type: boolean + vxlanEnabled: + description: + "VXLANEnabled overrides whether Felix should create the + VXLAN tunnel device for IPv4 VXLAN networking. Optional as Felix + determines this based on the existing IP pools. [Default: nil (unset)]" + type: boolean + vxlanMTU: + description: + "VXLANMTU is the MTU to set on the IPv4 VXLAN tunnel + device. See Configuring MTU [Default: 1410]" + type: integer + vxlanMTUV6: + description: + "VXLANMTUV6 is the MTU to set on the IPv6 VXLAN tunnel + device. See Configuring MTU [Default: 1390]" + type: integer + vxlanPort: + type: integer + vxlanVNI: + type: integer + wireguardEnabled: + description: + "WireguardEnabled controls whether Wireguard is enabled + for IPv4 (encapsulating IPv4 traffic over an IPv4 underlay network). + [Default: false]" + type: boolean + wireguardEnabledV6: + description: + "WireguardEnabledV6 controls whether Wireguard is enabled + for IPv6 (encapsulating IPv6 traffic over an IPv6 underlay network). + [Default: false]" + type: boolean + wireguardHostEncryptionEnabled: + description: + "WireguardHostEncryptionEnabled controls whether Wireguard + host-to-host encryption is enabled. [Default: false]" + type: boolean + wireguardInterfaceName: + description: + "WireguardInterfaceName specifies the name to use for + the IPv4 Wireguard interface. [Default: wireguard.cali]" + type: string + wireguardInterfaceNameV6: + description: + "WireguardInterfaceNameV6 specifies the name to use for + the IPv6 Wireguard interface. [Default: wg-v6.cali]" + type: string + wireguardKeepAlive: + description: + "WireguardKeepAlive controls Wireguard PersistentKeepalive + option. Set 0 to disable. [Default: 0]" + type: string + wireguardListeningPort: + description: + "WireguardListeningPort controls the listening port used + by IPv4 Wireguard. [Default: 51820]" + type: integer + wireguardListeningPortV6: + description: + "WireguardListeningPortV6 controls the listening port + used by IPv6 Wireguard. [Default: 51821]" + type: integer + wireguardMTU: + description: + "WireguardMTU controls the MTU on the IPv4 Wireguard + interface. See Configuring MTU [Default: 1440]" + type: integer + wireguardMTUV6: + description: + "WireguardMTUV6 controls the MTU on the IPv6 Wireguard + interface. See Configuring MTU [Default: 1420]" + type: integer + wireguardRoutingRulePriority: + description: + "WireguardRoutingRulePriority controls the priority value + to use for the Wireguard routing rule. [Default: 99]" + type: integer + workloadSourceSpoofing: + description: + WorkloadSourceSpoofing controls whether pods can use + the allowedSourcePrefixes annotation to send traffic with a source + IP address that is not theirs. This is disabled by default. When + set to "Any", pods can request any prefix. + type: string + xdpEnabled: + description: + "XDPEnabled enables XDP acceleration for suitable untracked + incoming deny rules. [Default: true]" + type: boolean + xdpRefreshInterval: + description: + "XDPRefreshInterval is the period at which Felix re-checks + all XDP state to ensure that no other process has accidentally broken + Calico's BPF maps or attached programs. Set to 0 to disable XDP + refresh. [Default: 90s]" + type: string + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkPolicy + listKind: GlobalNetworkPolicyList + plural: globalnetworkpolicies + singular: globalnetworkpolicy + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + properties: + applyOnForward: + description: + ApplyOnForward indicates to apply the rules in this policy + on forward traffic. + type: boolean + doNotTrack: + description: + DoNotTrack indicates whether packets matched by the rules + in this policy should go through the data plane's connection tracking, + such as Linux conntrack. If True, the rules in this policy are + applied before any data plane connection tracking, and packets allowed + by this policy are marked as not to be tracked. + type: boolean + egress: + description: + The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: + "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: + Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: + HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: + Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: + "Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR'd together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it." + items: + description: + "HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix" + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: + ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: + IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: + Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: + Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: + NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: + "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: + Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: + The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: + "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: + Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: + HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: + Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: + "Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR'd together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it." + items: + description: + "HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix" + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: + ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: + IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: + Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: + Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: + NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: + "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: + Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + namespaceSelector: + description: + NamespaceSelector is an optional field for an expression + used to select a pod based on namespaces. + type: string + order: + description: + Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + preDNAT: + description: + PreDNAT indicates to apply the rules in this policy before + any DNAT. + type: boolean + selector: + description: + "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: + ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: + "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress rules are present in the policy. The + default is: \n - [ PolicyTypeIngress ], if there are no Egress rules + (including the case where there are also no Ingress rules) \n + - [ PolicyTypeEgress ], if there are Egress rules but no Ingress + rules \n - [ PolicyTypeIngress, PolicyTypeEgress ], if there are + both Ingress and Egress rules. \n When the policy is read back again, + Types will always be one of these values, never empty or nil." + items: + description: + PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: globalnetworksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: GlobalNetworkSet + listKind: GlobalNetworkSetList + plural: globalnetworksets + singular: globalnetworkset + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + description: + GlobalNetworkSet contains a set of arbitrary IP sub-networks/CIDRs + that share labels to allow rules to refer to them via selectors. The labels + of GlobalNetworkSet are not namespaced. + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + GlobalNetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: hostendpoints.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: HostEndpoint + listKind: HostEndpointList + plural: hostendpoints + singular: hostendpoint + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + HostEndpointSpec contains the specification for a HostEndpoint + resource. + properties: + expectedIPs: + description: + "The expected IP addresses (IPv4 and IPv6) of the endpoint. + If \"InterfaceName\" is not present, Calico will look for an interface + matching any of the IPs in the list and apply policy to that. Note: + \tWhen using the selector match criteria in an ingress or egress + security Policy \tor Profile, Calico converts the selector into + a set of IP addresses. For host \tendpoints, the ExpectedIPs field + is used for that purpose. (If only the interface \tname is specified, + Calico does not learn the IPs of the interface for use in match + \tcriteria.)" + items: + type: string + type: array + interfaceName: + description: + "Either \"*\", or the name of a specific Linux interface + to apply policy to; or empty. \"*\" indicates that this HostEndpoint + governs all traffic to, from or through the default network namespace + of the host named by the \"Node\" field; entering and leaving that + namespace via any interface, including those from/to non-host-networked + local workloads. \n If InterfaceName is not \"*\", this HostEndpoint + only governs traffic that enters or leaves the host through the + specific interface named by InterfaceName, or - when InterfaceName + is empty - through the specific interface that has one of the IPs + in ExpectedIPs. Therefore, when InterfaceName is empty, at least + one expected IP must be specified. Only external interfaces (such + as \"eth0\") are supported here; it isn't possible for a HostEndpoint + to protect traffic through a specific local workload interface. + \n Note: Only some kinds of policy are implemented for \"*\" HostEndpoints; + initially just pre-DNAT policy. Please check Calico documentation + for the latest position." + type: string + node: + description: The node name identifying the Calico node instance. + type: string + ports: + description: + Ports contains the endpoint's named ports, which may + be referenced in security policy rules. + items: + properties: + name: + type: string + port: + type: integer + protocol: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + required: + - name + - port + - protocol + type: object + type: array + profiles: + description: + A list of identifiers of security Profile objects that + apply to this endpoint. Each profile is applied in the order that + they appear in this list. Profile rules are applied after the selector-based + security policy. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamblocks.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMBlock + listKind: IPAMBlockList + plural: ipamblocks + singular: ipamblock + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + IPAMBlockSpec contains the specification for an IPAMBlock + resource. + properties: + affinity: + description: + Affinity of the block, if this block has one. If set, + it will be of the form "host:". If not set, this block + is not affine to a host. + type: string + allocations: + description: + Array of allocations in-use within this block. nil entries + mean the allocation is free. For non-nil entries at index i, the + index is the ordinal of the allocation within this block and the + value is the index of the associated attributes in the Attributes + array. + items: + type: integer + # TODO: This nullable is manually added in. We should update controller-gen + # to handle []*int properly itself. + nullable: true + type: array + attributes: + description: + Attributes is an array of arbitrary metadata associated + with allocations in the block. To find attributes for a given allocation, + use the value of the allocation's entry in the Allocations array + as the index of the element in this array. + items: + properties: + handle_id: + type: string + secondary: + additionalProperties: + type: string + type: object + type: object + type: array + cidr: + description: The block's CIDR. + type: string + deleted: + description: + Deleted is an internal boolean used to workaround a limitation + in the Kubernetes API whereby deletion will not return a conflict + error if the block has been updated. It should not be set manually. + type: boolean + sequenceNumber: + default: 0 + description: + We store a sequence number that is updated each time + the block is written. Each allocation will also store the sequence + number of the block at the time of its creation. When releasing + an IP, passing the sequence number associated with the allocation + allows us to protect against a race condition and ensure the IP + hasn't been released and re-allocated since the release request. + format: int64 + type: integer + sequenceNumberForAllocation: + additionalProperties: + format: int64 + type: integer + description: + Map of allocated ordinal within the block to sequence + number of the block at the time of allocation. Kubernetes does not + allow numerical keys for maps, so the key is cast to a string. + type: object + strictAffinity: + description: + StrictAffinity on the IPAMBlock is deprecated and no + longer used by the code. Use IPAMConfig StrictAffinity instead. + type: boolean + unallocated: + description: + Unallocated is an ordered list of allocations which are + free in the block. + items: + type: integer + type: array + required: + - allocations + - attributes + - cidr + - strictAffinity + - unallocated + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamconfigs.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMConfig + listKind: IPAMConfigList + plural: ipamconfigs + singular: ipamconfig + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + IPAMConfigSpec contains the specification for an IPAMConfig + resource. + properties: + autoAllocateBlocks: + type: boolean + maxBlocksPerHost: + description: + MaxBlocksPerHost, if non-zero, is the max number of blocks + that can be affine to each host. + maximum: 2147483647 + minimum: 0 + type: integer + strictAffinity: + type: boolean + required: + - autoAllocateBlocks + - strictAffinity + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ipamhandles.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPAMHandle + listKind: IPAMHandleList + plural: ipamhandles + singular: ipamhandle + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + IPAMHandleSpec contains the specification for an IPAMHandle + resource. + properties: + block: + additionalProperties: + type: integer + type: object + deleted: + type: boolean + handleID: + type: string + required: + - block + - handleID + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: ippools.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPPool + listKind: IPPoolList + plural: ippools + singular: ippool + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: IPPoolSpec contains the specification for an IPPool resource. + properties: + allowedUses: + description: + AllowedUse controls what the IP pool will be used for. If + not specified or empty, defaults to ["Tunnel", "Workload"] for back-compatibility + items: + type: string + type: array + blockSize: + description: + The block size to use for IP address assignments from + this pool. Defaults to 26 for IPv4 and 122 for IPv6. + type: integer + cidr: + description: The pool CIDR. + type: string + disableBGPExport: + description: + "Disable exporting routes from this IP Pool's CIDR over + BGP. [Default: false]" + type: boolean + disabled: + description: + When disabled is true, Calico IPAM will not assign addresses + from this pool. + type: boolean + ipip: + description: + "Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only." + properties: + enabled: + description: + When enabled is true, ipip tunneling will be used + to deliver packets to destinations within this pool. + type: boolean + mode: + description: + The IPIP mode. This can be one of "always" or "cross-subnet". A + mode of "always" will also use IPIP tunneling for routing to + destination IP addresses within this pool. A mode of "cross-subnet" + will only use IPIP tunneling when the destination node is on + a different subnet to the originating node. The default value + (if not specified) is "always". + type: string + type: object + ipipMode: + description: + Contains configuration for IPIP tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. IPIP tunneling + is disabled). + type: string + nat-outgoing: + description: + "Deprecated: this field is only used for APIv1 backwards + compatibility. Setting this field is not allowed, this field is + for internal use only." + type: boolean + natOutgoing: + description: + When natOutgoing is true, packets sent from Calico networked + containers in this pool to destinations outside of this pool will + be masqueraded. + type: boolean + nodeSelector: + description: + Allows IPPool to allocate for a specific node by label + selector. + type: string + vxlanMode: + description: + Contains configuration for VXLAN tunneling for this pool. + If not specified, then this is defaulted to "Never" (i.e. VXLAN + tunneling is disabled). + type: string + required: + - cidr + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: (devel) + creationTimestamp: null + name: ipreservations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: IPReservation + listKind: IPReservationList + plural: ipreservations + singular: ipreservation + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + IPReservationSpec contains the specification for an IPReservation + resource. + properties: + reservedCIDRs: + description: + ReservedCIDRs is a list of CIDRs and/or IP addresses + that Calico IPAM will exclude from new allocations. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: kubecontrollersconfigurations.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: KubeControllersConfiguration + listKind: KubeControllersConfigurationList + plural: kubecontrollersconfigurations + singular: kubecontrollersconfiguration + preserveUnknownFields: false + scope: Cluster + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + KubeControllersConfigurationSpec contains the values of the + Kubernetes controllers configuration. + properties: + controllers: + description: + Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: + Namespace enables and configures the namespace controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]" + type: string + type: object + node: + description: + Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: + HostEndpoint controls syncing nodes to host endpoints. + Disabled by default, set to nil to disable. + properties: + autoCreate: + description: + "AutoCreate enables automatic creation of + host endpoints for every node. [Default: Disabled]" + type: string + type: object + leakGracePeriod: + description: + "LeakGracePeriod is the period used by the controller + to determine if an IP address has been leaked. Set to 0 + to disable IP garbage collection. [Default: 15m]" + type: string + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]" + type: string + syncLabels: + description: + "SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]" + type: string + type: object + policy: + description: + Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]" + type: string + type: object + serviceAccount: + description: + ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]" + type: string + type: object + workloadEndpoint: + description: + WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform reconciliation + with the Calico datastore. [Default: 5m]" + type: string + type: object + type: object + debugProfilePort: + description: + DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: + "EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]" + type: string + healthChecks: + description: + "HealthChecks enables or disables support for health + checks [Default: Enabled]" + type: string + logSeverityScreen: + description: + "LogSeverityScreen is the log severity above which logs + are sent to the stdout. [Default: Info]" + type: string + prometheusMetricsPort: + description: + "PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: 9094]" + type: integer + required: + - controllers + type: object + status: + description: + KubeControllersConfigurationStatus represents the status + of the configuration. It's useful for admins to be able to see the actual + config that was applied, which can be modified by environment variables + on the kube-controllers process. + properties: + environmentVars: + additionalProperties: + type: string + description: + EnvironmentVars contains the environment variables on + the kube-controllers that influenced the RunningConfig. + type: object + runningConfig: + description: + RunningConfig contains the effective config that is running + in the kube-controllers pod, after merging the API resource with + any environment variables. + properties: + controllers: + description: + Controllers enables and configures individual Kubernetes + controllers + properties: + namespace: + description: + Namespace enables and configures the namespace + controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]" + type: string + type: object + node: + description: + Node enables and configures the node controller. + Enabled by default, set to nil to disable. + properties: + hostEndpoint: + description: + HostEndpoint controls syncing nodes to host + endpoints. Disabled by default, set to nil to disable. + properties: + autoCreate: + description: + "AutoCreate enables automatic creation + of host endpoints for every node. [Default: Disabled]" + type: string + type: object + leakGracePeriod: + description: + "LeakGracePeriod is the period used by the + controller to determine if an IP address has been leaked. + Set to 0 to disable IP garbage collection. [Default: + 15m]" + type: string + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]" + type: string + syncLabels: + description: + "SyncLabels controls whether to copy Kubernetes + node labels to Calico nodes. [Default: Enabled]" + type: string + type: object + policy: + description: + Policy enables and configures the policy controller. + Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]" + type: string + type: object + serviceAccount: + description: + ServiceAccount enables and configures the service + account controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]" + type: string + type: object + workloadEndpoint: + description: + WorkloadEndpoint enables and configures the workload + endpoint controller. Enabled by default, set to nil to disable. + properties: + reconcilerPeriod: + description: + "ReconcilerPeriod is the period to perform + reconciliation with the Calico datastore. [Default: + 5m]" + type: string + type: object + type: object + debugProfilePort: + description: + DebugProfilePort configures the port to serve memory + and cpu profiles on. If not specified, profiling is disabled. + format: int32 + type: integer + etcdV3CompactionPeriod: + description: + "EtcdV3CompactionPeriod is the period between etcdv3 + compaction requests. Set to 0 to disable. [Default: 10m]" + type: string + healthChecks: + description: + "HealthChecks enables or disables support for health + checks [Default: Enabled]" + type: string + logSeverityScreen: + description: + "LogSeverityScreen is the log severity above which + logs are sent to the stdout. [Default: Info]" + type: string + prometheusMetricsPort: + description: + "PrometheusMetricsPort is the TCP port that the Prometheus + metrics server should bind to. Set to 0 to disable. [Default: + 9094]" + type: integer + required: + - controllers + type: object + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networkpolicies.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkPolicy + listKind: NetworkPolicyList + plural: networkpolicies + singular: networkpolicy + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + properties: + egress: + description: + The ordered set of egress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: + "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: + Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: + HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: + Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: + "Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR'd together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it." + items: + description: + "HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix" + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: + ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: + IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: + Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: + Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: + NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: + "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: + Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + ingress: + description: + The ordered set of ingress rules. Each rule contains + a set of packet match criteria and a corresponding action to apply. + items: + description: + "A Rule encapsulates a set of match criteria and an + action. Both selector-based security Policy and security Profiles + reference rules - separated out as a list of rules for both ingress + and egress packet matching. \n Each positive match criteria has + a negated version, prefixed with \"Not\". All the match criteria + within a rule must be satisfied for a packet to match. A single + rule can contain the positive and negative version of a match + and both must be satisfied for the rule to match." + properties: + action: + type: string + destination: + description: + Destination contains the match criteria that apply + to destination entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + http: + description: + HTTP contains match criteria that apply to HTTP + requests. + properties: + methods: + description: + Methods is an optional field that restricts + the rule to apply only to HTTP requests that use one of + the listed HTTP Methods (e.g. GET, PUT, etc.) Multiple + methods are OR'd together. + items: + type: string + type: array + paths: + description: + "Paths is an optional field that restricts + the rule to apply to HTTP requests that use one of the + listed HTTP Paths. Multiple paths are OR'd together. + e.g: - exact: /foo - prefix: /bar NOTE: Each entry may + ONLY specify either a `exact` or a `prefix` match. The + validator will check for it." + items: + description: + "HTTPPath specifies an HTTP path to match. + It may be either of the form: exact: : which matches + the path exactly or prefix: : which matches + the path prefix" + properties: + exact: + type: string + prefix: + type: string + type: object + type: array + type: object + icmp: + description: + ICMP is an optional field that restricts the rule + to apply to a specific type and code of ICMP traffic. This + should only be specified if the Protocol field is set to "ICMP" + or "ICMPv6". + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + ipVersion: + description: + IPVersion is an optional field that restricts the + rule to only match a specific IP version. + type: integer + metadata: + description: + Metadata contains additional information for this + rule + properties: + annotations: + additionalProperties: + type: string + description: + Annotations is a set of key value pairs that + give extra information about the rule + type: object + type: object + notICMP: + description: NotICMP is the negated version of the ICMP field. + properties: + code: + description: + Match on a specific ICMP code. If specified, + the Type value must also be specified. This is a technical + limitation imposed by the kernel's iptables firewall, + which Calico uses to enforce the rule. + type: integer + type: + description: + Match on a specific ICMP type. For example + a value of 8 refers to ICMP Echo Request (i.e. pings). + type: integer + type: object + notProtocol: + anyOf: + - type: integer + - type: string + description: + NotProtocol is the negated version of the Protocol + field. + pattern: ^.* + x-kubernetes-int-or-string: true + protocol: + anyOf: + - type: integer + - type: string + description: + "Protocol is an optional field that restricts the + rule to only apply to traffic of a specific IP protocol. Required + if any of the EntityRules contain Ports (because ports only + apply to certain protocols). \n Must be one of these string + values: \"TCP\", \"UDP\", \"ICMP\", \"ICMPv6\", \"SCTP\", + \"UDPLite\" or an integer in the range 1-255." + pattern: ^.* + x-kubernetes-int-or-string: true + source: + description: + Source contains the match criteria that apply to + source entity. + properties: + namespaceSelector: + description: + "NamespaceSelector is an optional field that + contains a selector expression. Only traffic that originates + from (or terminates at) endpoints within the selected + namespaces will be matched. When both NamespaceSelector + and another selector are defined on the same rule, then + only workload endpoints that are matched by both selectors + will be selected by the rule. \n For NetworkPolicy, an + empty NamespaceSelector implies that the Selector is limited + to selecting only workload endpoints in the same namespace + as the NetworkPolicy. \n For NetworkPolicy, `global()` + NamespaceSelector implies that the Selector is limited + to selecting only GlobalNetworkSet or HostEndpoint. \n + For GlobalNetworkPolicy, an empty NamespaceSelector implies + the Selector applies to workload endpoints across all + namespaces." + type: string + nets: + description: + Nets is an optional field that restricts the + rule to only apply to traffic that originates from (or + terminates at) IP addresses in any of the given subnets. + items: + type: string + type: array + notNets: + description: + NotNets is the negated version of the Nets + field. + items: + type: string + type: array + notPorts: + description: + NotPorts is the negated version of the Ports + field. Since only some protocols have ports, if any ports + are specified it requires the Protocol match in the Rule + to be set to "TCP" or "UDP". + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + notSelector: + description: + NotSelector is the negated version of the Selector + field. See Selector field for subtleties with negated + selectors. + type: string + ports: + description: + "Ports is an optional field that restricts + the rule to only apply to traffic that has a source (destination) + port that matches one of these ranges/values. This value + is a list of integers or strings that represent ranges + of ports. \n Since only some protocols have ports, if + any ports are specified it requires the Protocol match + in the Rule to be set to \"TCP\" or \"UDP\"." + items: + anyOf: + - type: integer + - type: string + pattern: ^.* + x-kubernetes-int-or-string: true + type: array + selector: + description: + "Selector is an optional field that contains + a selector expression (see Policy for sample syntax). + \ Only traffic that originates from (terminates at) endpoints + matching the selector will be matched. \n Note that: in + addition to the negated version of the Selector (see NotSelector + below), the selector expression syntax itself supports + negation. The two types of negation are subtly different. + One negates the set of matched endpoints, the other negates + the whole match: \n \tSelector = \"!has(my_label)\" matches + packets that are from other Calico-controlled \tendpoints + that do not have the label \"my_label\". \n \tNotSelector + = \"has(my_label)\" matches packets that are not from + Calico-controlled \tendpoints that do have the label \"my_label\". + \n The effect is that the latter will accept packets from + non-Calico sources whereas the former is limited to packets + from Calico-controlled endpoints." + type: string + serviceAccounts: + description: + ServiceAccounts is an optional field that restricts + the rule to only apply to traffic that originates from + (or terminates at) a pod running as a matching service + account. + properties: + names: + description: + Names is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account whose name is in the list. + items: + type: string + type: array + selector: + description: + Selector is an optional field that restricts + the rule to only apply to traffic that originates + from (or terminates at) a pod running as a service + account that matches the given label selector. If + both Names and Selector are specified then they are + AND'ed. + type: string + type: object + services: + description: + "Services is an optional field that contains + options for matching Kubernetes Services. If specified, + only traffic that originates from or terminates at endpoints + within the selected service(s) will be matched, and only + to/from each endpoint's port. \n Services cannot be specified + on the same rule as Selector, NotSelector, NamespaceSelector, + Nets, NotNets or ServiceAccounts. \n Ports and NotPorts + can only be specified with Services on ingress rules." + properties: + name: + description: + Name specifies the name of a Kubernetes + Service to match. + type: string + namespace: + description: + Namespace specifies the namespace of the + given Service. If left empty, the rule will match + within this policy's namespace. + type: string + type: object + type: object + required: + - action + type: object + type: array + order: + description: + Order is an optional field that specifies the order in + which the policy is applied. Policies with higher "order" are applied + after those with lower order. If the order is omitted, it may be + considered to be "infinite" - i.e. the policy will be applied last. Policies + with identical order will be applied in alphanumerical order based + on the Policy "Name". + type: number + selector: + description: + "The selector is an expression used to pick pick out + the endpoints that the policy should be applied to. \n Selector + expressions follow this syntax: \n \tlabel == \"string_literal\" + \ -> comparison, e.g. my_label == \"foo bar\" \tlabel != \"string_literal\" + \ -> not equal; also matches if label is not present \tlabel in + { \"a\", \"b\", \"c\", ... } -> true if the value of label X is + one of \"a\", \"b\", \"c\" \tlabel not in { \"a\", \"b\", \"c\", + ... } -> true if the value of label X is not one of \"a\", \"b\", + \"c\" \thas(label_name) -> True if that label is present \t! expr + -> negation of expr \texpr && expr -> Short-circuit and \texpr + || expr -> Short-circuit or \t( expr ) -> parens for grouping \tall() + or the empty selector -> matches all endpoints. \n Label names are + allowed to contain alphanumerics, -, _ and /. String literals are + more permissive but they do not support escape characters. \n Examples + (with made-up labels): \n \ttype == \"webserver\" && deployment + == \"prod\" \ttype in {\"frontend\", \"backend\"} \tdeployment != + \"dev\" \t! has(label_name)" + type: string + serviceAccountSelector: + description: + ServiceAccountSelector is an optional field for an expression + used to select a pod based on service accounts. + type: string + types: + description: + "Types indicates whether this policy applies to ingress, + or to egress, or to both. When not explicitly specified (and so + the value on creation is empty or nil), Calico defaults Types according + to what Ingress and Egress are present in the policy. The default + is: \n - [ PolicyTypeIngress ], if there are no Egress rules (including + the case where there are also no Ingress rules) \n - [ PolicyTypeEgress + ], if there are Egress rules but no Ingress rules \n - [ PolicyTypeIngress, + PolicyTypeEgress ], if there are both Ingress and Egress rules. + \n When the policy is read back again, Types will always be one + of these values, never empty or nil." + items: + description: + PolicyType enumerates the possible values of the PolicySpec + Types field. + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/kdd-crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: networksets.crd.projectcalico.org +spec: + group: crd.projectcalico.org + names: + kind: NetworkSet + listKind: NetworkSetList + plural: networksets + singular: networkset + preserveUnknownFields: false + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: NetworkSet is the Namespaced-equivalent of the GlobalNetworkSet. + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + NetworkSetSpec contains the specification for a NetworkSet + resource. + properties: + nets: + description: The list of IP networks that belong to this set. + items: + type: string + type: array + type: object + type: object + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +# Include a clusterrole for the kube-controllers component, +# and bind it to the calico-kube-controllers serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +rules: + # Nodes are watched to monitor for deletions. + - apiGroups: [""] + resources: + - nodes + verbs: + - watch + - list + - get + # Pods are watched to check for existence as part of IPAM controller. + - apiGroups: [""] + resources: + - pods + verbs: + - get + - list + - watch + # IPAM resources are manipulated in response to node and block updates, as well as periodic triggers. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipreservations + verbs: + - list + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + - watch + # Pools are watched to maintain a mapping of blocks to IP pools. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + verbs: + - list + - watch + # kube-controllers manages hostendpoints. + - apiGroups: ["crd.projectcalico.org"] + resources: + - hostendpoints + verbs: + - get + - list + - create + - update + - delete + # Needs access to update clusterinformations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - clusterinformations + verbs: + - get + - list + - create + - update + - watch + # KubeControllersConfiguration is where it gets its config + - apiGroups: ["crd.projectcalico.org"] + resources: + - kubecontrollersconfigurations + verbs: + # read its own config + - get + # create a default if none exists + - create + # update status + - update + # watch for changes + - watch +--- +# Source: calico/templates/calico-node-rbac.yaml +# Include a clusterrole for the calico-node DaemonSet, +# and bind it to the calico-node serviceaccount. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-node +rules: + # Used for creating service account tokens to be used by the CNI plugin + - apiGroups: [""] + resources: + - serviceaccounts/token + resourceNames: + - calico-node + verbs: + - create + # The CNI plugin needs to get pods, nodes, and namespaces. + - apiGroups: [""] + resources: + - pods + - nodes + - namespaces + verbs: + - get + # EndpointSlices are used for Service-based network policy rule + # enforcement. + - apiGroups: ["discovery.k8s.io"] + resources: + - endpointslices + verbs: + - watch + - list + - apiGroups: [""] + resources: + - endpoints + - services + verbs: + # Used to discover service IPs for advertisement. + - watch + - list + # Used to discover Typhas. + - get + # Pod CIDR auto-detection on kubeadm needs access to config maps. + - apiGroups: [""] + resources: + - configmaps + verbs: + - get + - apiGroups: [""] + resources: + - nodes/status + verbs: + # Needed for clearing NodeNetworkUnavailable flag. + - patch + # Calico stores some configuration information in node annotations. + - update + # Watch for changes to Kubernetes NetworkPolicies. + - apiGroups: ["networking.k8s.io"] + resources: + - networkpolicies + verbs: + - watch + - list + # Used by Calico for policy information. + - apiGroups: [""] + resources: + - pods + - namespaces + - serviceaccounts + verbs: + - list + - watch + # The CNI plugin patches pods/status. + - apiGroups: [""] + resources: + - pods/status + verbs: + - patch + # Calico monitors various CRDs for config. + - apiGroups: ["crd.projectcalico.org"] + resources: + - globalfelixconfigs + - felixconfigurations + - bgppeers + - globalbgpconfigs + - bgpconfigurations + - ippools + - ipreservations + - ipamblocks + - globalnetworkpolicies + - globalnetworksets + - networkpolicies + - networksets + - clusterinformations + - hostendpoints + - blockaffinities + - caliconodestatuses + verbs: + - get + - list + - watch + # Calico must create and update some CRDs on startup. + - apiGroups: ["crd.projectcalico.org"] + resources: + - ippools + - felixconfigurations + - clusterinformations + verbs: + - create + - update + # Calico must update some CRDs. + - apiGroups: ["crd.projectcalico.org"] + resources: + - caliconodestatuses + verbs: + - update + # Calico stores some configuration information on the node. + - apiGroups: [""] + resources: + - nodes + verbs: + - get + - list + - watch + # These permissions are only required for upgrade from v2.6, and can + # be removed after upgrade or on fresh installations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - bgpconfigurations + - bgppeers + verbs: + - create + - update + # These permissions are required for Calico CNI to perform IPAM allocations. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + - ipamblocks + - ipamhandles + verbs: + - get + - list + - create + - update + - delete + # The CNI plugin and calico/node need to be able to create a default + # IPAMConfiguration + - apiGroups: ["crd.projectcalico.org"] + resources: + - ipamconfigs + verbs: + - get + - create + # Block affinities must also be watchable by confd for route aggregation. + - apiGroups: ["crd.projectcalico.org"] + resources: + - blockaffinities + verbs: + - watch + # The Calico IPAM migration needs to get daemonsets. These permissions can be + # removed if not upgrading from an installation using host-local IPAM. + - apiGroups: ["apps"] + resources: + - daemonsets + verbs: + - get +--- +# Source: calico/templates/calico-kube-controllers-rbac.yaml +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: calico-kube-controllers +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-kube-controllers +subjects: + - kind: ServiceAccount + name: calico-kube-controllers + namespace: kube-system +--- +# Source: calico/templates/calico-node-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: calico-node +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-node +subjects: + - kind: ServiceAccount + name: calico-node + namespace: kube-system +--- +# Source: calico/templates/calico-node.yaml +# This manifest installs the calico-node container, as well +# as the CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: apps/v1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + spec: + nodeSelector: + kubernetes.io/os: linux + hostNetwork: true + tolerations: + # Make sure calico-node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 + priorityClassName: system-node-critical + initContainers: + # This container performs upgrade from host-local IPAM to calico-ipam. + # It can be deleted if this is a fresh installation, or if you have already + # upgraded to use calico-ipam. + - name: upgrade-ipam + image: docker.io/calico/cni:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/calico-ipam", "-upgrade"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + volumeMounts: + - mountPath: /var/lib/cni/networks + name: host-local-net-dir + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + securityContext: + privileged: true + # This container installs the CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: docker.io/calico/cni:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["/opt/cni/bin/install"] + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + # Set the hostname based on the k8s node name. + - name: KUBERNETES_NODE_NAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # CNI MTU Config variable + - name: CNI_MTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Prevents the container from sleeping forever. + - name: SLEEP + value: "false" + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + securityContext: + privileged: true + # This init container mounts the necessary filesystems needed by the BPF data plane + # i.e. bpf at /sys/fs/bpf and cgroup2 at /run/calico/cgroup. Calico-node initialisation is executed + # in best effort fashion, i.e. no failure for errors, to not disrupt pod creation in iptable mode. + - name: "mount-bpffs" + image: docker.io/calico/node:v3.25.0 + imagePullPolicy: IfNotPresent + command: ["calico-node", "-init", "-best-effort"] + volumeMounts: + - mountPath: /sys/fs + name: sys-fs + # Bidirectional is required to ensure that the new mount we make at /sys/fs/bpf propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + - mountPath: /var/run/calico + name: var-run-calico + # Bidirectional is required to ensure that the new mount we make at /run/calico/cgroup propagates to the host + # so that it outlives the init container. + mountPropagation: Bidirectional + # Mount /proc/ from host which usually is an init program at /nodeproc. It's needed by mountns binary, + # executed by calico-node, to mount root cgroup2 fs at /run/calico/cgroup to attach CTLB programs correctly. + - mountPath: /nodeproc + name: nodeproc + readOnly: true + securityContext: + privileged: true + containers: + # Runs calico-node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: docker.io/calico/node:v3.25.0 + imagePullPolicy: IfNotPresent + envFrom: + - configMapRef: + # Allow KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT to be overridden for eBPF mode. + name: kubernetes-services-endpoint + optional: true + env: + # Use Kubernetes API as the backing datastore. + - name: DATASTORE_TYPE + value: "kubernetes" + # Wait for the datastore. + - name: WAIT_FOR_DATASTORE + value: "true" + # Set based on the k8s node name. + - name: NODENAME + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + # Enable IPIP + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Enable or Disable VXLAN on the default IP pool. + - name: CALICO_IPV4POOL_VXLAN + value: "Never" + # Enable or Disable VXLAN on the default IPv6 IP pool. + - name: CALICO_IPV6POOL_VXLAN + value: "Never" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the VXLAN tunnel device. + - name: FELIX_VXLANMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # Set MTU for the Wireguard tunnel device. + - name: FELIX_WIREGUARDMTU + valueFrom: + configMapKeyRef: + name: calico-config + key: veth_mtu + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within `--cluster-cidr`. + # - name: CALICO_IPV4POOL_CIDR + # value: "192.168.0.0/16" + # Disable file logging so `kubectl logs` works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + lifecycle: + preStop: + exec: + command: + - /bin/calico-node + - -shutdown + livenessProbe: + exec: + command: + - /bin/calico-node + - -felix-live + - -bird-live + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /bin/calico-node + - -felix-ready + - -bird-ready + periodSeconds: 10 + timeoutSeconds: 10 + volumeMounts: + # For maintaining CNI plugin API credentials. + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + readOnly: false + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /run/xtables.lock + name: xtables-lock + readOnly: false + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - name: policysync + mountPath: /var/run/nodeagent + # For eBPF mode, we need to be able to mount the BPF filesystem at /sys/fs/bpf so we mount in the + # parent directory. + - name: bpffs + mountPath: /sys/fs/bpf + - name: cni-log-dir + mountPath: /var/log/calico/cni + readOnly: true + volumes: + # Used by calico-node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate + - name: sys-fs + hostPath: + path: /sys/fs/ + type: DirectoryOrCreate + - name: bpffs + hostPath: + path: /sys/fs/bpf + type: Directory + # mount /proc at /nodeproc to be used by mount-bpffs initContainer to mount root cgroup2 fs. + - name: nodeproc + hostPath: + path: /proc + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Used to access CNI logs. + - name: cni-log-dir + hostPath: + path: /var/log/calico/cni + # Mount in the directory for host-local IPAM allocations. This is + # used when upgrading from host-local to calico-ipam, and can be removed + # if not using the upgrade-ipam init container. + - name: host-local-net-dir + hostPath: + path: /var/lib/cni/networks + # Used to create per-pod Unix Domain Sockets + - name: policysync + hostPath: + type: DirectoryOrCreate + path: /var/run/nodeagent +--- +# Source: calico/templates/calico-kube-controllers.yaml +# See https://github.com/projectcalico/kube-controllers +apiVersion: apps/v1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers +spec: + # The controllers can only have a single active instance. + replicas: 1 + selector: + matchLabels: + k8s-app: calico-kube-controllers + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + nodeSelector: + kubernetes.io/os: linux + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule + serviceAccountName: calico-kube-controllers + priorityClassName: system-cluster-critical + containers: + - name: calico-kube-controllers + image: docker.io/calico/kube-controllers:v3.25.0 + imagePullPolicy: IfNotPresent + env: + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: node + - name: DATASTORE_TYPE + value: kubernetes + livenessProbe: + exec: + command: + - /usr/bin/check-status + - -l + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + timeoutSeconds: 10 + readinessProbe: + exec: + command: + - /usr/bin/check-status + - -r + periodSeconds: 10 diff --git a/config/datacoveslocal.com/base/cert-manager-v1.11.0.yaml b/config/datacoveslocal.com/base/cert-manager-v1.11.0.yaml new file mode 100644 index 00000000..1a56ff3b --- /dev/null +++ b/config/datacoveslocal.com/base/cert-manager-v1.11.0.yaml @@ -0,0 +1,5546 @@ +# https://github.com/jetstack/cert-manager/releases/download/v1.11.0/cert-manager.yaml + +# Copyright 2022 The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + categories: + - cert-manager + scope: Cluster + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. + type: string + format: byte + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: "Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme." + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: "INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false." + type: boolean + solvers: + description: "Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/" + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: "API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions." + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: "The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``." + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: "The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: string + accessKeyIDSecretRef: + description: "The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: "The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: "When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways" + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: challenges.acme.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: acme.cert-manager.io + names: + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + type: object + required: + - authorizationURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + properties: + authorizationURL: + description: The URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + key: + description: "The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content." + type: string + solver: + description: Contains the domain solving configuration that should be used to solve this challenge resource. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: "API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions." + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: "The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``." + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: "The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: string + accessKeyIDSecretRef: + description: "The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: "The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: "When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways" + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + token: + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". + type: string + enum: + - HTTP-01 + - DNS-01 + url: + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + status: + type: object + properties: + presented: + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Contains human readable information on why the Challenge is in the current state. + type: string + state: + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + served: true + storage: true + subresources: + status: {} +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificaterequests.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + type: object + required: + - issuerRef + - request + properties: + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + additionalProperties: + type: array + items: + type: string + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: array + items: + type: string + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + type: string + format: byte + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + status: + description: Status of the CertificateRequest. This is set and managed automatically. + type: object + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + type: string + format: byte + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + type: string + format: byte + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + type: array + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: issuers.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. + type: string + format: byte + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: "Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme." + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: "INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false." + type: boolean + solvers: + description: "Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/" + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: "API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions." + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: "The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``." + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: "The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: string + accessKeyIDSecretRef: + description: "The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: "The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: "When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways" + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the Issuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificates.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + type: object + required: + - issuerRef + - secretName + properties: + additionalOutputFormats: + description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components. + type: array + items: + description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. + type: object + required: + - type + properties: + type: + description: Type is the name of the format type that should be written to the Certificate's target Secret. + type: string + enum: + - DER + - CombinedPEM + commonName: + description: "CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4" + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + type: array + items: + type: string + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailAddresses: + description: EmailAddresses is a list of email subjectAltNames to be set on the Certificate. + type: array + items: + type: string + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + type: array + items: + type: string + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + type: object + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + literalSubject: + description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook. + type: string + privateKey: + description: Options to control private keys used for the Certificate. + type: object + properties: + algorithm: + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA`,`Ed25519` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm. + type: string + enum: + - RSA + - ECDSA + - Ed25519 + encoding: + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. + type: string + enum: + - PKCS1 + - PKCS8 + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + enum: + - Never + - Always + size: + description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed. + type: integer + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + type: integer + format: int32 + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + secretTemplate: + description: SecretTemplate defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret. + type: object + properties: + annotations: + description: Annotations is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + labels: + description: Labels is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + type: object + properties: + countries: + description: Countries to be used on the Certificate. + type: array + items: + type: string + localities: + description: Cities to be used on the Certificate. + type: array + items: + type: string + organizationalUnits: + description: Organizational Units to be used on the Certificate. + type: array + items: + type: string + organizations: + description: Organizations to be used on the Certificate. + type: array + items: + type: string + postalCodes: + description: Postal codes to be used on the Certificate. + type: array + items: + type: string + provinces: + description: State/Provinces to be used on the Certificate. + type: array + items: + type: string + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + type: array + items: + type: string + uris: + description: URIs is a list of URI subjectAltNames to be set on the Certificate. + type: array + items: + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: Status of the Certificate. This is set and managed automatically. + type: object + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + type: array + items: + description: CertificateCondition contains condition information for an Certificate. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failedIssuanceAttempts: + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). + type: integer + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + type: string + format: date-time + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + type: string + format: date-time + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + type: string + format: date-time + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + type: string + format: date-time + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: acme.cert-manager.io + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + type: object + required: + - issuerRef + - request + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + type: string + format: byte + status: + type: object + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + type: array + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + type: object + required: + - url + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + type: array + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + type: string + format: byte + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + served: true + storage: true +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +--- +# Source: cert-manager/templates/webhook-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +data: +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: + ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: + [ + "certificates", + "certificates/status", + "certificaterequests", + "certificaterequests/status", + ] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: + ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update", "patch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update", "patch"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["httproutes"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: + ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways/finalizers", "httproutes/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["signers"] + verbs: ["approve"] + resourceNames: + ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to: +# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers +# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: + ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] + verbs: ["sign"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +rules: + # Used for leader election by the controller + # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller + # see cmd/cainjector/start.go#L113 + # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller + # see cmd/cainjector/start.go#L137 + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: + [ + "cert-manager-cainjector-leader-election", + "cert-manager-cainjector-leader-election-core", + ] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-controller"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: [""] + resources: ["secrets"] + resourceNames: + - "cert-manager-webhook-ca" + verbs: ["get", "list", "watch", "update"] + # It's not possible to grant CREATE permission on a single resourceName. + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +# Source: cert-manager/templates/rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + name: tcp-prometheus-servicemonitor + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: "https" + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + spec: + serviceAccountName: cert-manager-cainjector + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-cainjector + image: "quay.io/jetstack/cert-manager-cainjector:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: "true" + prometheus.io/port: "9402" + spec: + serviceAccountName: cert-manager + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-controller + image: "quay.io/jetstack/cert-manager-controller:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=kube-system + - --acme-http01-solver-image=quay.io/jetstack/cert-manager-acmesolver:v1.11.0 + - --max-concurrent-challenges=60 + ports: + - containerPort: 9402 + name: http-metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + spec: + serviceAccountName: cert-manager-webhook + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-webhook + image: "quay.io/jetstack/cert-manager-webhook:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc + + ports: + - name: https + protocol: TCP + containerPort: 10250 + - name: healthcheck + protocol: TCP + containerPort: 6080 + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + # Only include 'sideEffects' field in Kubernetes 1.12+ + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /mutate +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /validate diff --git a/config/datacoveslocal.com/base/dashboard-v2.4.0.yaml b/config/datacoveslocal.com/base/dashboard-v2.4.0.yaml new file mode 100644 index 00000000..6d4a0bd3 --- /dev/null +++ b/config/datacoveslocal.com/base/dashboard-v2.4.0.yaml @@ -0,0 +1,305 @@ +# https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kubernetes-dashboard +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kubernetes-dashboard +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kubernetes-dashboard +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kubernetes-dashboard + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: kubernetesui/dashboard:v2.4.0 + imagePullPolicy: Always + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kubernetes-dashboard + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + nodeSelector: + "kubernetes.io/os": linux + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: dashboard-metrics-scraper + image: kubernetesui/metrics-scraper:v1.0.7 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + nodeSelector: + "kubernetes.io/os": linux + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: {} diff --git a/config/datacoveslocal.com/base/domain.csr b/config/datacoveslocal.com/base/domain.csr new file mode 100644 index 00000000..ce9e1b06 --- /dev/null +++ b/config/datacoveslocal.com/base/domain.csr @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICvzCCAacCAQAwejELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAkNBMQswCQYDVQQH +DAJMQTESMBAGA1UECgwJRGF0YWNvdmVzMRswGQYDVQQDDBJkYXRhY292ZXNsb2Nh +bC5jb20xIDAeBgkqhkiG9w0BCQEWEWhleUBkYXRhY292ZXMuY29tMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAox138CdOkigSmIoagQfvui1RH4IkyZYe +4dRLLHP40D3fMnV5Vj0CLERhNamDWsWZ0IaLHWbej5VilV++8+68282K6HneKsBB +CBaYxCIVvTKIYZ9sWBKEtouQBm34mJSEdEBcmfECux6LqTNvO9U1QW3+4iA4Vp7k +y20bqbSMlhin377H3dWkShCH5aaa5rEY0Ob+y1lzd9SKqNXiIbEOUm4sNigMF4XR +FvUaVCv47Ip+u9cQTX7kgRx4Nc7j41sa228EG4Ll7z22ptBrGPRs+uuxMWdvsPiW +KQ9Hj574+fOz50DwReTVyUZ7FSa26XUZjD19q6ANUqveCKON7e2uIwIDAQABoAAw +DQYJKoZIhvcNAQELBQADggEBAC3x3ekbtTdS0T2pL/VsFuqxST6aINpmi8wzQYjR +88Ze6Jk7E9aleCNa1Rrk3O14aIU4sL+T3cWZNOceRTDRCHqOY7OOeJ+mP96PfnuQ +a9XHMDu4TfjHQD3GjRloI52d2+2osWTfJQII2fLZI59c8bX99R/vesC3aG7OJSjQ +yr/eyjvRMZt1z44xcgspnM+L7Ca+Ubf1gm1N/e24Qj1BFJOAR/k6nYRJ0DNV4Fte +a1CxlB87VH4S0sRx6EXFo7v5UJva55GcRk3et12YCe2GCvwX56bIJTB7IV5jw/Rb +MGq1gvvn/kIRhvMfJfAyW2gLo72fuCzWjF9AgwPWhffJjc8= +-----END CERTIFICATE REQUEST----- diff --git a/config/datacoveslocal.com/base/ingress-nginx-patch.yaml b/config/datacoveslocal.com/base/ingress-nginx-patch.yaml new file mode 100644 index 00000000..66195e11 --- /dev/null +++ b/config/datacoveslocal.com/base/ingress-nginx-patch.yaml @@ -0,0 +1,31 @@ +# Redirect http to https +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx-controller + namespace: ingress-nginx +data: + ssl-redirect: "true" +--- +# Configure TLS certificate to use with --default-ssl-certificate +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + template: + spec: + containers: + - name: controller + args: + - /nginx-ingress-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --publish-status-address=localhost + - --watch-ingress-without-class + - --default-ssl-certificate=ingress-nginx/default-tls diff --git a/config/datacoveslocal.com/base/ingress-nginx-v1.6.4.yaml b/config/datacoveslocal.com/base/ingress-nginx-v1.6.4.yaml new file mode 100644 index 00000000..2156c2b2 --- /dev/null +++ b/config/datacoveslocal.com/base/ingress-nginx-v1.6.4.yaml @@ -0,0 +1,659 @@ +# https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.6.4/deploy/static/provider/kind/deploy.yaml + +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + name: ingress-nginx +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resourceNames: + - ingress-nginx-leader + resources: + - leases + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: v1 +data: + allow-snippet-annotations: "true" +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - appProtocol: http + name: http + port: 80 + protocol: TCP + targetPort: http + - appProtocol: https + name: https + port: 443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: NodePort +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + ports: + - appProtocol: https + name: https-webhook + port: 443 + targetPort: webhook + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + minReadySeconds: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + strategy: + rollingUpdate: + maxUnavailable: 1 + type: RollingUpdate + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + spec: + containers: + - args: + - /nginx-ingress-controller + - --election-id=ingress-nginx-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --watch-ingress-without-class=true + - --publish-status-address=localhost + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + image: registry.k8s.io/ingress-nginx/controller:v1.6.4@sha256:15be4666c53052484dd2992efacf2f50ea77a78ae8aa21ccd91af6baaa7ea22f + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: 80 + hostPort: 80 + name: http + protocol: TCP + - containerPort: 443 + hostPort: 443 + name: https + protocol: TCP + - containerPort: 8443 + name: webhook + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 90Mi + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true + dnsPolicy: ClusterFirst + nodeSelector: + ingress-ready: "true" + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 0 + tolerations: + - effect: NoSchedule + key: node-role.kubernetes.io/master + operator: Equal + - effect: NoSchedule + key: node-role.kubernetes.io/control-plane + operator: Equal + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-create + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-create + spec: + containers: + - args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f + imagePullPolicy: IfNotPresent + name: create + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-patch + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-patch + spec: + containers: + - args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f + imagePullPolicy: IfNotPresent + name: patch + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +webhooks: + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: ingress-nginx + path: /networking/v1/ingresses + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None diff --git a/config/datacoveslocal.com/base/kind-registry.yaml b/config/datacoveslocal.com/base/kind-registry.yaml new file mode 100644 index 00000000..42049323 --- /dev/null +++ b/config/datacoveslocal.com/base/kind-registry.yaml @@ -0,0 +1,10 @@ +# To run a local docker registry. +apiVersion: v1 +kind: ConfigMap +metadata: + name: local-registry-hosting + namespace: kube-public +data: + localRegistryHosting.v1: | + host: "localhost:5000" + help: "https://kind.sigs.k8s.io/docs/user/local-registry/" diff --git a/config/datacoveslocal.com/base/kustomization.yaml b/config/datacoveslocal.com/base/kustomization.yaml new file mode 100644 index 00000000..c1a8cf54 --- /dev/null +++ b/config/datacoveslocal.com/base/kustomization.yaml @@ -0,0 +1,26 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - metrics-server-v0.6.2.yaml + - calico-v3.25.yaml + - cert-manager-v1.11.0.yaml + - ingress-nginx-v1.6.4.yaml + # - dashboard-v2.4.0.yaml + - kind-registry.yaml + - storage_class_dummies.yaml + +patchesStrategicMerge: + - metrics-server-patch.yaml + - ingress-nginx-patch.yaml + +secretGenerator: + # TLS certificate for *.datacoveslocal.com + - name: default-tls + namespace: ingress-nginx + type: kubernetes.io/tls + files: + - tls.crt=local-cert.cer + - tls.key=local-cert.key + options: + disableNameSuffixHash: true diff --git a/config/datacoveslocal.com/base/metrics-server-patch.yaml b/config/datacoveslocal.com/base/metrics-server-patch.yaml new file mode 100644 index 00000000..544e93d3 --- /dev/null +++ b/config/datacoveslocal.com/base/metrics-server-patch.yaml @@ -0,0 +1,17 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: metrics-server + namespace: kube-system +spec: + template: + spec: + containers: + - name: metrics-server + args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + - --kubelet-insecure-tls diff --git a/config/datacoveslocal.com/base/metrics-server-v0.6.2.yaml b/config/datacoveslocal.com/base/metrics-server-v0.6.2.yaml new file mode 100644 index 00000000..908e8967 --- /dev/null +++ b/config/datacoveslocal.com/base/metrics-server-v0.6.2.yaml @@ -0,0 +1,198 @@ +# https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.2/components.yaml + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: + - apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get + - apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + image: registry.k8s.io/metrics-server/metrics-server:v0.6.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + resources: + requests: + cpu: 100m + memory: 200Mi + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/config/datacoveslocal.com/base/storage_class_dummies.yaml b/config/datacoveslocal.com/base/storage_class_dummies.yaml new file mode 100644 index 00000000..af5c504e --- /dev/null +++ b/config/datacoveslocal.com/base/storage_class_dummies.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: efs + namespace: local-path-storage +provisioner: cluster.local/nfs-server-provisioner +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: azurefile-csi + namespace: local-path-storage +provisioner: cluster.local/nfs-server-provisioner \ No newline at end of file diff --git a/config/datacoveslocal.com/cluster-params.yaml b/config/datacoveslocal.com/cluster-params.yaml new file mode 100644 index 00000000..eff5e433 --- /dev/null +++ b/config/datacoveslocal.com/cluster-params.yaml @@ -0,0 +1,116 @@ +domain: datacoveslocal.com +context: kind-datacoves-cluster + +provider: kind +kubernetes_version: "1.31.1" # Must match cli.py: kind_create. + +## Docker image defaults +release: "3.4.202505141555" + +# docker_registry: +# docker_secret_name: +# extra_images: + +## cert-manager +# cert_manager_issuer: + +## external-dns +# external_dns_url: "" # kc get -A svc | grep LoadBalancer +postgres_db_provisioner: + host: postgres-postgresql.core.svc.cluster.local + user: postgres + pass: password + +airflow_config: + db: + external: true + logs: + external: true + backend: nfs + +superset_config: + db: + external: true + +airbyte_config: + db: + tls: false + backend: postgres + external: true + tls_enabled: false + host_verification: false + tls_disable_host_verification: true + logs: + backend: s3 + external: false + +datahub_config: + db: + external: true + backend: postgres + +## Account +account: + name: Local + slug: local + owner: + email: hey@datacoves.com + name: Datacoves Admin + is_superuser: true + setup_enabled: true + +## Projects +projects: + analytics: + name: Analytics + slug: analytics-local + clone_strategy: http_clone + repository: + url: https://github.com/datacoves/balboa.git + git_url: git@github.com:datacoves/balboa.git + settings: + dbt_profile: default + groups: + admins: + - ADMIN-TEST + developers: + - DEVELOPER-TEST + viewers: + - VIEWER-TEST + +## Features +features_enabled: + admin_users: true + admin_groups: true + admin_account: true + admin_billing: true + admin_projects: true + accounts_signup: false # change to true when testing stripe integration + admin_invitations: true + admin_environments: true + admin_connections: true + admin_integrations: true + admin_secrets: true + admin_service_credentials: true + user_profile_change_name: true + user_profile_delete_account: true + user_profile_change_credentials: true + user_profile_change_ssh_keys: true + user_profile_change_ssl_keys: true + codeserver_restart: true + observability_stack: false + env_grafana_dashboards_enabled: false + +## Local development flags +dont_use_uwsgi: true +celery_worker_autoreload: true +run_core_api_db_in_cluster: true +local_api_volume: true +local_dbt_api_volume: false +local_workbench_image: false +local_workbench_volume: false +enable_dbt_api: true +expose_dbt_api: true # Exposes the Jade API at dbt.{cluster_domain} +observability_stack: true +core_liveness_readiness: false +install_node_local_dns: true diff --git a/config/datacoveslocal.com/environments/dev123/airbyte.yaml b/config/datacoveslocal.com/environments/dev123/airbyte.yaml new file mode 100644 index 00000000..4491ca07 --- /dev/null +++ b/config/datacoveslocal.com/environments/dev123/airbyte.yaml @@ -0,0 +1,11 @@ +# Sample airbyte.secret.yaml with external db and s3 for logs. +# db_config: +# host: "db_host:5432" +# password: "db_pass" +# user: "db_user" +# logs_config: +# AWS_ACCESS_KEY_ID: "XXXXXXXXXXXXXXXXXXXX" +# AWS_SECRET_ACCESS_KEY: "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" +# S3_LOG_BUCKET: "s3_bucket_name" +# S3_LOG_BUCKET_REGION: "xx-xxxx-x" +# S3_MINIO_ENDPOINT: "" diff --git a/config/datacoveslocal.com/environments/dev123/airflow.yaml b/config/datacoveslocal.com/environments/dev123/airflow.yaml new file mode 100644 index 00000000..df33392e --- /dev/null +++ b/config/datacoveslocal.com/environments/dev123/airflow.yaml @@ -0,0 +1,9 @@ +dags_folder: orchestrate/dags +yaml_dags_folder: orchestrate/dags +git_branch: airflow_development + +db: + external: true +logs: + backend: nfs + external: true diff --git a/config/datacoveslocal.com/environments/dev123/environment.yaml b/config/datacoveslocal.com/environments/dev123/environment.yaml new file mode 100644 index 00000000..3d4e13f0 --- /dev/null +++ b/config/datacoveslocal.com/environments/dev123/environment.yaml @@ -0,0 +1,18 @@ +name: Development +project: analytics-local +type: dev +release: "3.4.202505141555" + +services: + airbyte: + enabled: false + airflow: + enabled: false + code-server: + enabled: true + dbt-docs: + enabled: true + superset: + enabled: false +dbt_home_path: transform +dbt_profiles_dir: automate/dbt diff --git a/config/datacoveslocal.com/kind/kind-cluster.yaml b/config/datacoveslocal.com/kind/kind-cluster.yaml new file mode 100644 index 00000000..fb7f6424 --- /dev/null +++ b/config/datacoveslocal.com/kind/kind-cluster.yaml @@ -0,0 +1,55 @@ +kind: Cluster +apiVersion: kind.x-k8s.io/v1alpha4 +name: datacoves-cluster +nodes: + - role: control-plane + kubeadmConfigPatches: + - | + kind: InitConfiguration + nodeRegistration: + kubeletExtraArgs: + node-labels: "ingress-ready=true" + extraPortMappings: + - containerPort: 80 + hostPort: 80 + protocol: TCP + - containerPort: 443 + hostPort: 443 + protocol: TCP + extraMounts: + - hostPath: src/core/api/app + containerPath: /mnt/core-api + - hostPath: src/core/dbt-api + containerPath: /mnt/core-dbt-api + - hostPath: src/core/workbench/app + containerPath: /mnt/core-workbench + - hostPath: src/test-app + containerPath: /mnt/test-app + - role: worker + labels: + k8s.datacoves.com/nodegroup-kind: general + k8s.datacoves.com/workers: enabled + extraMounts: + - hostPath: src/core/api/app + containerPath: /mnt/core-api + - hostPath: src/core/dbt-api + containerPath: /mnt/core-dbt-api + - hostPath: src/core/workbench/app + containerPath: /mnt/core-workbench + - hostPath: src/test-app + containerPath: /mnt/test-app + - role: worker + labels: + k8s.datacoves.com/nodegroup-kind: volumed + extraMounts: + - hostPath: src/core/api/app + containerPath: /mnt/core-api + - hostPath: src/core/dbt-api + containerPath: /mnt/core-dbt-api + - hostPath: src/core/workbench/app + containerPath: /mnt/core-workbench + - hostPath: src/test-app + containerPath: /mnt/test-app +networking: + disableDefaultCNI: true + podSubnet: 10.201.0.0/16 diff --git a/config/datacoveslocal.com/pricing.yaml b/config/datacoveslocal.com/pricing.yaml new file mode 100644 index 00000000..17ca8fc4 --- /dev/null +++ b/config/datacoveslocal.com/pricing.yaml @@ -0,0 +1,1013 @@ +plans: + growth-monthly: + billing_period: monthly + environment_quotas: {} + kind: growth + trial_period_days: 0 + variants: + - pro: + default: false + items: + - price: + billing_scheme: per_unit + created: 1700047625 + currency: usd + custom_unit_amount: null + id: price_1OChFNLF8qmfSSrQ5D0Ajvue + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosqqsXODF6CFh + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10290 + unit_amount_decimal: '10290' + - price: + billing_scheme: per_unit + created: 1700144313 + currency: usd + custom_unit_amount: null + id: price_1OD6OrLF8qmfSSrQJx667MjN + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosmZqbSgP65Iu + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 5145 + unit_amount_decimal: '5145' + - price: + billing_scheme: per_unit + created: 1700139816 + currency: usd + custom_unit_amount: null + id: price_1OD5EKLF8qmfSSrQjArvss7Y + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosnGOjcfCBKmh + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + - price: + billing_scheme: per_unit + created: 1700138520 + currency: usd + custom_unit_amount: null + id: price_1OD4tQLF8qmfSSrQdlMwESp6 + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosonUtcAxf4hs + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 5145 + unit_amount_decimal: '5145' + - price: + billing_scheme: per_unit + created: 1700142319 + currency: usd + custom_unit_amount: null + id: price_1OD5shLF8qmfSSrQe45HZlHP + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosppM3RQpT7a8 + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + - price: + billing_scheme: per_unit + created: 1700142367 + currency: usd + custom_unit_amount: null + id: price_1OD5tTLF8qmfSSrQcj3iRKLu + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosrxOn4j8Xra7 + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 5145 + unit_amount_decimal: '5145' + - standard: + default: true + items: + - price: + billing_scheme: per_unit + created: 1696442186 + currency: usd + custom_unit_amount: null + id: price_1NxZJ8LF8qmfSSrQgfUna6jl + livemode: false + lookup_key: null + metadata: {} + nickname: standard + product: prod_MosqqsXODF6CFh + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10000 + unit_amount_decimal: '10000' + - price: + billing_scheme: per_unit + created: 1696442188 + currency: usd + custom_unit_amount: null + id: price_1NxZJALF8qmfSSrQG1SJxnxc + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosmZqbSgP65Iu + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 5000 + unit_amount_decimal: '5000' + - price: + billing_scheme: per_unit + created: 1696442188 + currency: usd + custom_unit_amount: null + id: price_1NxZJALF8qmfSSrQaE5MUrll + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosnGOjcfCBKmh + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + - price: + billing_scheme: per_unit + created: 1696442187 + currency: usd + custom_unit_amount: null + id: price_1NxZJ9LF8qmfSSrQI1zohbQM + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosonUtcAxf4hs + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 5000 + unit_amount_decimal: '5000' + - price: + billing_scheme: per_unit + created: 1696442187 + currency: usd + custom_unit_amount: null + id: price_1NxZJ9LF8qmfSSrQuWm58nfd + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosppM3RQpT7a8 + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + - price: + billing_scheme: per_unit + created: 1696442186 + currency: usd + custom_unit_amount: null + id: price_1NxZJ8LF8qmfSSrQOjThHfaa + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosrxOn4j8Xra7 + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 5000 + unit_amount_decimal: '5000' + growth-yearly: + billing_period: yearly + environment_quotas: {} + kind: growth + trial_period_days: 0 + variants: + - standard: + default: true + items: + - price: + billing_scheme: per_unit + created: 1696442189 + currency: usd + custom_unit_amount: null + id: price_1NxZJBLF8qmfSSrQmjrgXNe7 + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_Mosjy8LcGTxq4Z + recurring: + aggregate_usage: null + interval: year + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 108000 + unit_amount_decimal: '108000' + - price: + billing_scheme: per_unit + created: 1696442191 + currency: usd + custom_unit_amount: null + id: price_1NxZJDLF8qmfSSrQFtdjXyo7 + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MoseSYnsAWxnKa + recurring: + aggregate_usage: null + interval: year + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 54000 + unit_amount_decimal: '54000' + - price: + billing_scheme: per_unit + created: 1696442191 + currency: usd + custom_unit_amount: null + id: price_1NxZJDLF8qmfSSrQ9jVONmEO + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_Mosfb6tck44VQv + recurring: + aggregate_usage: null + interval: year + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 54000 + unit_amount_decimal: '54000' + - price: + billing_scheme: per_unit + created: 1696442190 + currency: usd + custom_unit_amount: null + id: price_1NxZJCLF8qmfSSrQlMBLcz6g + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosgBelcMEr4mZ + recurring: + aggregate_usage: null + interval: year + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 54000 + unit_amount_decimal: '54000' + - price: + billing_scheme: per_unit + created: 1696442188 + currency: usd + custom_unit_amount: null + id: price_1NxZJALF8qmfSSrQaE5MUrll + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosnGOjcfCBKmh + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + - price: + billing_scheme: per_unit + created: 1696442187 + currency: usd + custom_unit_amount: null + id: price_1NxZJ9LF8qmfSSrQuWm58nfd + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosppM3RQpT7a8 + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + - pro: + default: false + items: + - price: + billing_scheme: per_unit + created: 1700139816 + currency: usd + custom_unit_amount: null + id: price_1OD5EKLF8qmfSSrQjArvss7Y + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosnGOjcfCBKmh + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + - price: + billing_scheme: per_unit + created: 1700142319 + currency: usd + custom_unit_amount: null + id: price_1OD5shLF8qmfSSrQe45HZlHP + livemode: false + lookup_key: null + metadata: {} + nickname: pro + product: prod_MosppM3RQpT7a8 + recurring: + aggregate_usage: sum + interval: month + interval_count: 1 + trial_period_days: null + usage_type: metered + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10 + unit_amount_decimal: '10' + starter-monthly: + billing_period: monthly + environment_quotas: {} + kind: starter + trial_period_days: 14 + variants: + - standard: + default: true + items: + - price: + billing_scheme: per_unit + created: 1696442188 + currency: usd + custom_unit_amount: null + id: price_1NxZJALF8qmfSSrQek4OwpvW + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_Mosl5w75zGxPKg + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 19900 + unit_amount_decimal: '19900' + - price: + billing_scheme: per_unit + created: 1696442190 + currency: usd + custom_unit_amount: null + id: price_1NxZJCLF8qmfSSrQZ8y4KGXC + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MosiioJvGGbxWy + recurring: + aggregate_usage: null + interval: month + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 10000 + unit_amount_decimal: '10000' + starter-yearly: + billing_period: yearly + environment_quotas: {} + kind: starter + trial_period_days: 14 + variants: + - standard: + default: true + items: + - price: + billing_scheme: per_unit + created: 1696442189 + currency: usd + custom_unit_amount: null + id: price_1NxZJBLF8qmfSSrQxFATwcTO + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_MoskxrmMsoDbvb + recurring: + aggregate_usage: null + interval: year + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 214920 + unit_amount_decimal: '214920' + - price: + billing_scheme: per_unit + created: 1696442190 + currency: usd + custom_unit_amount: null + id: price_1NxZJCLF8qmfSSrQYq2gbf86 + livemode: false + lookup_key: null + metadata: {} + nickname: null + product: prod_Mosh3H37eTTsQV + recurring: + aggregate_usage: null + interval: year + interval_count: 1 + trial_period_days: null + usage_type: licensed + tax_behavior: exclusive + tiers_mode: null + transform_quantity: null + type: recurring + unit_amount: 108000 + unit_amount_decimal: '108000' +products: + prod_MoseSYnsAWxnKa: + charges_per_seat: false + description: '' + id: prod_MoseSYnsAWxnKa + name: Airbyte - server/yr + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442186 + default_price: null + description: null + features: [] + id: prod_MoseSYnsAWxnKa + images: [] + livemode: false + metadata: + plans: growth-yearly + name: Airbyte - server/yr + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: server + updated: 1696844455 + url: null + tally_name: '' + prod_Mosfb6tck44VQv: + charges_per_seat: false + description: '' + id: prod_Mosfb6tck44VQv + name: Airflow - server/yr + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442185 + default_price: null + description: null + features: [] + id: prod_Mosfb6tck44VQv + images: [] + livemode: false + metadata: + plans: growth-yearly + name: Airflow - server/yr + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: server + updated: 1696844455 + url: null + tally_name: '' + prod_MosgBelcMEr4mZ: + charges_per_seat: false + description: '' + id: prod_MosgBelcMEr4mZ + name: Superset - server/yr + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442185 + default_price: null + description: null + features: [] + id: prod_MosgBelcMEr4mZ + images: [] + livemode: false + metadata: + plans: growth-yearly + name: Superset - server/yr + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: server + updated: 1696844455 + url: null + tally_name: '' + prod_Mosh3H37eTTsQV: + charges_per_seat: false + description: includes 1 Project + 1 Environment including 1 Airbyte Server + 10 + compute hours, 1 Airflow Server + 10 compute hours, 1 Superset Server + 10 viewer + licenses + id: prod_Mosh3H37eTTsQV + name: Starter Pack - bundle/yr + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442184 + default_price: null + description: includes 1 Project + 1 Environment including 1 Airbyte Server + + 10 compute hours, 1 Airflow Server + 10 compute hours, 1 Superset Server + + 10 viewer licenses + features: [] + id: prod_Mosh3H37eTTsQV + images: [] + livemode: false + metadata: + plans: starter-yearly + name: Starter Pack - bundle/yr + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: bundle + updated: 1696844455 + url: null + tally_name: '' + prod_MosiioJvGGbxWy: + charges_per_seat: false + description: includes 1 Project + 1 Environment including 1 Airbyte Server + 10 + compute hours, 1 Airflow Server + 10 compute hours, 1 Superset Server + 10 viewer + licenses + id: prod_MosiioJvGGbxWy + name: Starter Pack - bundle/mo + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442184 + default_price: null + description: includes 1 Project + 1 Environment including 1 Airbyte Server + + 10 compute hours, 1 Airflow Server + 10 compute hours, 1 Superset Server + + 10 viewer licenses + features: [] + id: prod_MosiioJvGGbxWy + images: [] + livemode: false + metadata: + plans: starter-monthly + name: Starter Pack - bundle/mo + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: bundle + updated: 1696844455 + url: null + tally_name: '' + prod_Mosjy8LcGTxq4Z: + charges_per_seat: true + description: '' + id: prod_Mosjy8LcGTxq4Z + name: Growth Plan - user/yr + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442184 + default_price: null + description: null + features: [] + id: prod_Mosjy8LcGTxq4Z + images: [] + livemode: false + metadata: + plans: growth-yearly + name: Growth Plan - user/yr + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: seat + updated: 1696844455 + url: null + tally_name: '' + prod_MoskxrmMsoDbvb: + charges_per_seat: true + description: '' + id: prod_MoskxrmMsoDbvb + name: Starter Plan - user/yr + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442183 + default_price: null + description: null + features: [] + id: prod_MoskxrmMsoDbvb + images: [] + livemode: false + metadata: + plans: starter-yearly + name: Starter Plan - user/yr + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: seat + updated: 1696844455 + url: null + tally_name: '' + prod_Mosl5w75zGxPKg: + charges_per_seat: true + description: '' + id: prod_Mosl5w75zGxPKg + name: Starter Plan - user/mo + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442183 + default_price: null + description: null + features: [] + id: prod_Mosl5w75zGxPKg + images: [] + livemode: false + metadata: + plans: starter-monthly + name: Starter Plan - user/mo + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: seat + updated: 1696844455 + url: null + tally_name: '' + prod_MosmZqbSgP65Iu: + charges_per_seat: false + description: '' + id: prod_MosmZqbSgP65Iu + name: Superset - server/mo + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442182 + default_price: null + description: null + features: [] + id: prod_MosmZqbSgP65Iu + images: [] + livemode: false + metadata: + plans: growth-monthly + name: Superset - server/mo + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: server + updated: 1700144314 + url: null + tally_name: '' + prod_MosnGOjcfCBKmh: + charges_per_seat: false + description: '' + id: prod_MosnGOjcfCBKmh + name: Airflow compute + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442182 + default_price: null + description: null + features: [] + id: prod_MosnGOjcfCBKmh + images: [] + livemode: false + metadata: + plans: growth-yearly,growth-monthly + name: Airflow compute + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: minute + updated: 1700139843 + url: null + tally_name: '' + prod_MosonUtcAxf4hs: + charges_per_seat: false + description: '' + id: prod_MosonUtcAxf4hs + name: Airflow - server/mo + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442182 + default_price: null + description: null + features: [] + id: prod_MosonUtcAxf4hs + images: [] + livemode: false + metadata: + plans: growth-monthly + name: Airflow - server/mo + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: server + updated: 1700138520 + url: null + tally_name: '' + prod_MosppM3RQpT7a8: + charges_per_seat: false + description: Airbyte workers compute + id: prod_MosppM3RQpT7a8 + name: Airbyte compute + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442181 + default_price: null + description: Airbyte workers compute + features: [] + id: prod_MosppM3RQpT7a8 + images: [] + livemode: false + metadata: + plans: growth-yearly,growth-monthly + name: Airbyte compute + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: minute + updated: 1700142320 + url: null + tally_name: '' + prod_MosqqsXODF6CFh: + charges_per_seat: true + description: Per Seat. Unlimited projects and environments. Airbyte, Airflow and + Superset services charged separately. + id: prod_MosqqsXODF6CFh + name: Growth Plan - user/mo + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442181 + default_price: null + description: Per Seat. Unlimited projects and environments. Airbyte, Airflow + and Superset services charged separately. + features: [] + id: prod_MosqqsXODF6CFh + images: [] + livemode: false + metadata: + plans: growth-monthly + name: Growth Plan - user/mo + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: seat + updated: 1700050681 + url: null + tally_name: '' + prod_MosrxOn4j8Xra7: + charges_per_seat: false + description: '' + id: prod_MosrxOn4j8Xra7 + name: Airbyte - server/mo + service_name: '' + stripe_data: + active: true + attributes: [] + created: 1696442180 + default_price: null + description: null + features: [] + id: prod_MosrxOn4j8Xra7 + images: [] + livemode: false + metadata: + plans: growth-monthly + name: Airbyte - server/mo + object: product + package_dimensions: null + shippable: null + statement_descriptor: null + tax_code: txcd_10103001 + type: service + unit_label: server + updated: 1700142368 + url: null + tally_name: '' + diff --git a/docs/.index.html.swp b/docs/.index.html.swp new file mode 100644 index 00000000..44448280 Binary files /dev/null and b/docs/.index.html.swp differ diff --git a/docs/Dockerfile b/docs/Dockerfile new file mode 100644 index 00000000..1cf4fba1 --- /dev/null +++ b/docs/Dockerfile @@ -0,0 +1,23 @@ +FROM nginx:latest + +# This lives exclusively on our VM and if you have access to this file in this +# repo, then you already have access to the documentation, so I don't see a +# point to hiding the password. +# +# We will bake a self-signed cert into here and rely on CloudFlare to +# translate it into a 'real' cert. +RUN apt-get update && apt-get install -y apache2-utils && \ + htpasswd -c -b /etc/nginx/htpasswd hey d@t@c0v3D3V3L0P3R && \ + openssl req -x509 -nodes -newkey rsa:4096 \ + -keyout "/etc/nginx/privkey.pem" \ + -out "/etc/nginx/fullchain.pem" \ + -days 3650 \ + -subj "/C=US/ST=NA/L=Datacoves/O=Datacoves/OU=Org/CN=devdocs.datacoves.com" + + +# Also build our + +COPY docs_output /usr/share/nginx/html +COPY default.conf /etc/nginx/conf.d/default.conf + +EXPOSE 8080 diff --git a/docs/assets/search-index.js b/docs/assets/search-index.js new file mode 100644 index 00000000..7180c89d --- /dev/null +++ b/docs/assets/search-index.js @@ -0,0 +1 @@ +{"documentCount":70,"nextId":70,"documentIds":{"0":"docs_output/client-docs/ccs/cluster-requirements-azure.html","1":"docs_output/client-docs/index.html","2":"docs_output/client-docs/jnj/1-cluster-requirements.html","3":"docs_output/client-docs/jnj/2-configuration.html","4":"docs_output/client-docs/jnj/3-configure-bastion-ec2-instance.html","5":"docs_output/client-docs/jnj/4-configure-bitbucket-and-jenkins.html","6":"docs_output/client-docs/jnj/5-deployment.html","7":"docs_output/client-docs/jnj/6-access.html","8":"docs_output/client-docs/jnj/7-configure-sa-docker-in-kubernates.html","9":"docs_output/client-docs/jnj/8-summary-requirements-new-cluster.html","10":"docs_output/client-docs/jnj/index.html","11":"docs_output/client-docs/kenvue/how-to-setup-helm-chart.html","12":"docs_output/client-docs/orrum/index.html","13":"docs_output/dev-logs/2021-06-create-er-diagram.html","14":"docs_output/dev-logs/2021-09-eks-setup.html","15":"docs_output/dev-logs/2021-12-jnj-ensembledev-deployment.html","16":"docs_output/dev-logs/2022-04-jnj-artemisdev-configuration.html","17":"docs_output/dev-logs/2022-04-jnj-ensembletest-deployment.html","18":"docs_output/dev-logs/2022-05-setup-aks-postgres-flexible-server.html","19":"docs_output/dev-logs/index.html","20":"docs_output/how-tos/administrate-east-us-a-aks-cluster.html","21":"docs_output/how-tos/airflow-configuration.html","22":"docs_output/how-tos/billing-system.html","23":"docs_output/how-tos/celery-monitoring.html","24":"docs_output/how-tos/choose-ec2-nodes.html","25":"docs_output/how-tos/codeserver-images.html","26":"docs_output/how-tos/connect-to-kenvue-cluster-using-a-bastion.html","27":"docs_output/how-tos/custom-dns.html","28":"docs_output/how-tos/datacoves-versioning.html","29":"docs_output/how-tos/debug-airflow-workers.html","30":"docs_output/how-tos/debug-dbt-errors-when-return-code-is-not-zero.html","31":"docs_output/how-tos/grafana-grant-permisions.html","32":"docs_output/how-tos/grafana-loki-storage-config-providers.html","33":"docs_output/how-tos/grafana-loki-storage-config.html","34":"docs_output/how-tos/hotfix.html","35":"docs_output/how-tos/how-to-create-a-ssl-certificate.html","36":"docs_output/how-tos/index.html","37":"docs_output/how-tos/install-python-reqs-on-jnj-bastion.html","38":"docs_output/how-tos/list-code-server-pods-processes.html","39":"docs_output/how-tos/make-and-install-a-release.html","40":"docs_output/how-tos/manage-profiles-and-image-sets.html","41":"docs_output/how-tos/move-a-gpg-secret-key.html","42":"docs_output/how-tos/onboard-a-new-project-on-datacoves.html","43":"docs_output/how-tos/prometheus-queries.html","44":"docs_output/how-tos/q-and-a.html","45":"docs_output/how-tos/recover-disk-on-aks.html","46":"docs_output/how-tos/register-github-self-hosted-runner.html","47":"docs_output/how-tos/release-notes.html","48":"docs_output/how-tos/request-access-to-a-cloud-pc-on-kenvue.html","49":"docs_output/how-tos/reset-datahub.html","50":"docs_output/how-tos/security-vulnerabilities-fix.html","51":"docs_output/how-tos/set-maintenance-mode.html","52":"docs_output/how-tos/setup-oauth-on-azure.html","53":"docs_output/how-tos/setup-s3-for-dbt-api.html","54":"docs_output/how-tos/testing-alerts.html","55":"docs_output/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.html","56":"docs_output/how-tos/update-kubernetes-and-datacoves.html","57":"docs_output/how-tos/update-ssl-certificates.html","58":"docs_output/how-tos/upgrade-dbt-or-related-tools.html","59":"docs_output/how-tos/work-on-a-pre-release-locally.html","60":"docs_output/implementation/index.html","61":"docs_output/implementation/operator.html","62":"docs_output/index.html","63":"docs_output/issues-resolutions/airflow-corrupted-dag-logs.html","64":"docs_output/issues-resolutions/dbt-core-debugging.html","65":"docs_output/issues-resolutions/docker-image-debugging.html","66":"docs_output/issues-resolutions/docker-push-stopped-working.html","67":"docs_output/issues-resolutions/helm-chart.html","68":"docs_output/issues-resolutions/pomerium-not-allowing-access.html","69":"index.template.html"},"fieldIds":{"text":0},"fieldLength":{"0":[153],"1":[90],"2":[608],"3":[254],"4":[229],"5":[141],"6":[131],"7":[178],"8":[126],"9":[138],"10":[59],"11":[77],"12":[172],"13":[48],"14":[143],"15":[279],"16":[82],"17":[120],"18":[119],"19":[3],"20":[105],"21":[55],"22":[380],"23":[143],"24":[103],"25":[175],"26":[129],"27":[95],"28":[142],"29":[127],"30":[83],"31":[25],"32":[137],"33":[83],"34":[201],"35":[72],"36":[3],"37":[39],"38":[41],"39":[141],"40":[169],"41":[74],"42":[118],"43":[80],"44":[98],"45":[192],"46":[162],"47":[164],"48":[18],"49":[134],"50":[33],"51":[34],"52":[144],"53":[84],"54":[95],"55":[40],"56":[700],"57":[558],"58":[55],"59":[95],"60":[3],"61":[416],"62":[127],"63":[170],"64":[83],"65":[71],"66":[58],"67":[96],"68":[155],"69":[4]},"averageFieldLength":[137.98571428571435],"storedFields":{"0":{"url":"/docs_output/client-docs/ccs/cluster-requirements-azure.html","snip":"Edit on github\n \n \n \n \n \n \n Summary for the requirements of a new Cluster.\n \n \n \n \n Database (Azure Database for PostgreSQL - Flexible Server)\n \n \n \n \n Minimum requirements\n \n \n \n Version: 14 or later\n \n \n Workload Type: Production\n \n \n Compute+Storage: General Purpose, D4ds_v5\n \n \n Geo-Redundancy and High Availability optional but recommended.\n \n \n Admin user/password required and must be provided to Datacoves.\n \n \n Storage Type: Premium SSD\n \n \n Storage Size: 128 GiB\n \n \n Performance Tier: P10\n \n \n Storage auto growth enabled optional but recommended.\n \n \n \n \n \n Kubernetes Services\n \n \n \n \n Configuration\n \n \n \n Kubernetes version: 1.30.6 or later\n \n \n \n \n \n Node pools\n \n \n \n general\n \n \n volumed\n \n \n workers - Standard_D4s_v3 node, 128 gig OS disk size\n \n \n \n \n \n Worker groups\n \n \n \n General\n \n \n Volumed\n \n \n Workers\n \n \n \n \n \n General\n \n \n \n Standard_D4s_v3\n \n \n min_nodes: 1\n \n \n max_nodes: 4\n \n \n root_volume_size: 128\n \n \n labels:\n \n \n labels:\n ...\n - key: k8s.datacoves.com/nodegroup-kind\n value: general\n\n \n \n \n Volumed\n \n \n \n Standard_D16s_v5\n \n \n min_nodes: 1\n \n \n max_nodes: 4\n \n \n root_volume_size: 512\n \n \n labels:\n \n \n labels:\n ...\n - key: k8s.datacoves.com/nodegroup-kind\n value: volumed\n\n \n \n \n Workers\n \n \n \n min_nodes: 1\n \n \n max_nodes: 4\n \n \n root_volume_size: 128\n \n \n labels:\n \n \n labels:\n ...\n - key: k8s.datacoves.com/workers\n value: enabled\n\n \n \n \n Other configuration.\n \n \n \n \n SSL Certificate\n \n \n We recommend using a wildcard certificate, however we can also use cert manager for free certificates if that is the preference.\n \n \n Certificates must be issued for:\n \n \n \n \n *.domain.com\n \n \n \n \n domain.com\n \n \n \n \n Where 'domain.com' is whatever base domain you wish to use. We recommend using \"datacoves.YOUR_DOMAIN.YOUR_TLD\", such as 'datacoves.mycompany.com'. In such a case, you would need certificates for:\n \n \n \n \n *.datacoves.mycompany.com\n \n \n \n \n datacoves.mycompany.com\n \n \n \n \n \n \n DNS Configuration\n \n \n Either DNS must be configured to support the same wildcard and base domain, or the cluster must be allowed to create DNS entries via kubernetes' external-dns annotation."},"1":{"url":"/docs_output/client-docs/","snip":"Edit on github\n \n \n \n \n \n \n Customer clusters\n \n \n \n \n jnj\n \n \n \n Artemis\n \n \n Artemis dev\n \n \n Ensemble\n \n \n Ensemble test\n \n \n RND\n \n \n \n \n \n Requirements:\n \n \n \n Access to jnj workspace (worspace is provided by jnj and is personal)\n \n \n Request access: Onboarding (Noel)\n \n \n Check access: https://jnjitod.service-now.com/myworkspaces\n \n \n \n \n Repo with all configurations https://sourcecode.jnj.com/projects/asx-ahrx/repos/datacoves_deployment/browse (READ.me with all configurations). There will be a specific repo for each clusters (onboarding Noel).\n \n \n Request access: https://confluence.jnj.com/pages/viewpage.action?spaceKey=AHRX&title=How+to+request+access+to+Bitbucket+-+How+to+request+access+-+How+to+guides\n \n \n \n \n Access to Bastion\n \n \n \n \n \n kenvue\n \n \n \n Chap dev\n \n \n Chap production\n \n \n \n \n \n Requirements:\n \n \n \n Access to kenvue microsoft remote desktop (provided by jnj and is personal)\n \n \n Request access: Onboarding (Noel)\n \n \n Check access: https://kenvue.sharepoint.com/\n \n \n \n \n Repo is the same as jnj\n \n \n Access to Bastion\n \n \n \n \n \n orrum\n \n \n \n old\n \n \n new\n \n \n \n \n \n Requirements:\n \n \n \n Download VPN fron Azure (see client-docs instructions)\n \n \n Credentials in 1 password"},"2":{"url":"/docs_output/client-docs/jnj/1-cluster-requirements.html","snip":"Edit on github\n \n \n \n \n \n \n Datacoves cluster requirements\n \n \n \n Summary for the requirements of a new Cluster.\n \n \n \n \n \n EKS cluster\n \n \n The clusters are created through CloudX pipelines, from\n \n cluster.yaml\n \n files (\n \n docs\n \n ).\nFor every cluster there's a git repository with the cluster definition. If your\nteam create one of this repositories, please either grant access to datacoves staff so\nwe can make changes if required or ask us to check your\n \n cluster.yaml\n \n .\n \n \n An example repository of this kind is\n \n itx-ank/ensemble\n \n .\n \n \n Important configuration to take into consideration:\n \n \n \n Kubernetes version: latest confirmed working version. This is either -1 or -2 releases from current based on the time of year.\n \n \n Addons versions\n \n \n Worker groups: general, volumed, and workers.\n \n \n \n \n \n Cluster configuration files\n \n \n \n \n \n Cluster\n \n \n Repository\n \n \n Branch\n \n \n \n \n \n \n Ensemble test\n \n \n https://sourcecode.jnj.com/projects/ITX-ANK/repos/ensemble/browse/_scm_cluster\n \n \n test\n \n \n \n \n Ensemble\n \n \n https://sourcecode.jnj.com/projects/ITX-CCC/repos/ensemble/browse/_scm_cluster\n \n \n production\n \n \n \n \n R&D\n \n \n https://sourcecode.jnj.com/projects/ITX-BHE/repos/integrationscluster/browse/_scm_cluster\n \n \n test\n \n \n \n \n Artemis Dev\n \n \n https://sourcecode.jnj.com/projects/ITX-ADW/repos/artemiseks/browse/_scm_cluster\n \n \n development\n \n \n \n \n Artemis\n \n \n https://sourcecode.jnj.com/projects/ITX-ADW/repos/artemiseks/browse/_scm_cluster\n \n \n production\n \n \n \n \n Chap development\n \n \n https://sourcecode.jnj.com/projects/ITX-WCR/repos/datacove/browse/_scm_cluster\n \n \n development\n \n \n \n \n Chap production\n \n \n https://sourcecode.jnj.com/projects/ITX-WCR/repos/datacove/browse/_scm_cluster\n \n \n production\n \n \n \n \n \n Once the cluster was provisioned, you'll receive an e-mail containing the details to configure\n \n kubectl\n \n . Please forward to the datacoves team.\n \n \n The installer will need kubectl access to the cluster\n \n docs\n \n .\n \n \n \n \n Opt out from EFS CSI driver\n \n \n The EFS CSI driver installed by cloudx is usually outdated (v1.0.0) so we need to opt out from the cloudx managed service.\n \n \n To opt out from EFS CSI managed driver, create a pull request on this repo, similar to this\n \n one\n \n .\n \n \n \n \n External DNS\n \n \n In the cluster.yaml configuration there is a key\n \n external_dns\n \n . This key deploys the service\n \n External DNS\n \n to the cluster, managed by CloudX.\nThis service might not be available in some clusters yet, so a manual configuration might be needed on Route53 or any other DNS service, typically a CNAME record pointing to the cluster's load balancer hostname.\n \n \n \n \n Getting load balancer's hostname\n \n kubectl -n ingress-nginx get svc ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}'\n\n \n \n \n SSL Certificates Manager\n \n \n CloudX will install\n \n Cert Manager\n \n if the cluster supports it.\n \n \n If Cert Manager is not installed, 2 SSL certificates need to be issued manually:\n- wildcard certificate: *.[SUBDOMAIN].[DOMAIN]\n- root certificate: [SUBDOMAIN].[DOMAIN]\n \n \n A certificate chain file and a Private key are required for each certificate, please send the 4 files to Datacoves staff.\n \n \n \n \n Git repositories\n \n \n \n \n Config repo\n \n \n Each datacoves installation requires a configuration repo where Datacoves staff will store configuration details.\n \n \n Please create one repo per kubernetes cluster and grant access to Datacoves staff.\n \n \n \n \n Dbt analytics repo\n \n \n This is the repo where your analytics (dbt) project resides, along with airflow dags, db security roles, documentation, etc.\n \n \n \n \n Git Service Account\n \n \n Please create a Service Account with read access to the analytics repo, since that service account will be configured on services like Airflow and dbt-docs to read files from the repo.\n \n \n To do so, submit a PR to have Cloudx stop managing the currently installed driver here: https://sourcecode.jnj.com/projects/ITX-AED/repos/cloudx_container_pipelines_configs/browse/argocd/config.yaml#19\n \n \n This account will be also used by Jenkins to download images from artifactory (taqy-docker namespace), so please request access to\n \n taqy-docker\n \n on that account via AppDevTools.\n \n \n \n \n Database\n \n \n Some services require Postgres databases, as described below. These databases can share an RDS instance or aurora cluster. You will need to create this database cluster/instance and ensure it can be accessed from the EKS cluster.\n \n \n \n \n Minimum requirements\n \n \n \n Engine: Postgres\n \n \n Version: 14.9\n \n \n Multi-AZ: \"Single DB Instance\" for sandbox clusters, \"Multi-AZ DB Cluster\" if not.\n \n \n Master user: postgres\n \n \n Master password:\n \n \n \n \n Instance class: db.r5.large\n \n \n Storage type: Aurora Standard or gp2\n \n \n Allocated_storage: 100GB\n \n \n Enable storage autoscaling\n \n \n Maximum storage threshold: 1TB\n \n \n Authentication: password\n \n \n \n Keep in mind that JNJ cycles the master password every 24 hours so you need to run any setup command using this password before that happens.\n \n \n \n \n Initial database and user\n \n \n You'll need to create a master Postgres user and the datacoves database:\n \n CREATE USER datacoves PASSWORD insert_generated_random_password_without_special_characters;\nALTER USER datacoves CREATEDB CREATEROLE;\nGRANT datacoves TO postgres;\nCREATE DATABASE datacoves OWNER datacoves;\nREVOKE connect ON DATABASE datacoves FROM PUBLIC;\nGRANT connect ON DATABASE datacoves TO datacoves;\nGRANT connect ON DATABASE datacoves TO postgres;\n\n \n A way to generate passwords:\n \n python -c 'import secrets; print(secrets.token_urlsafe())'\n \n .\nAvoid special characters, they cause issues with some services, such as airflow.\n \n \n Please share this password with the Datacoves team.\n \n \n \n \n Active Directory groups\n \n \n Roles/groups required for datacoves users:\n \n JNJ-APP-{division}-DATACOVES-ADMIN\nJNJ-APP-{division}-DATACOVES-DEVELOPER\nJNJ-APP-{division}-DATACOVES-VIEWER\nJNJ-APP-{division}-DATACOVES-KTLO\n\n \n Substitute your\n \n {division}\n \n , e.g.\n \n PCE\n \n ,\n \n HMD\n \n ,\n \n CHAP\n \n , etc.\n \n \n \n \n Ping identity account\n \n \n Submit a ticket to\n \n Web Single Sign-On - SAML Federation\n \n to create a ping account.\n \n \n \n \n IRIS Request\n \n \n \n \n Short Description\n \n \n This is a request to enable SSO for\n \n cluster.\n \n \n \n \n \n Description\n \n \n Need to add PingID to application.\n \n \n \n \n Groups\n \n \n Need groups only filtered to ones that have the following pattern JNJ-APP-\n \n -DATACOVES-*\n \n \n \n \n \n Type\n \n \n Choose: OAuth/OpenID Connect\n \n \n \n \n Client id\n \n \n It should be any name for your cluster (e.g.\n \n chapsbx\n \n ,\n \n emea_ensemble_test\n \n ,\n \n emea_artemis_dev\n \n , etc.).\n \n \n \n \n Redirect urls\n \n \n \n https://api.{cluster_domain}/complete/ping_federate\n \n \n \n \n \n Additional fields\n \n \n Requires interactive electronic signatures using SSO: No\nAttributes: groups, openid, profile, email\n \n \n When the Iris request is fulfilled, you will receive an email with:\n \n \n \n Client ID (verify this is the one that was requested)\n \n \n Client Secret\n \n \n A list of OAuth endpoints\n \n \n \n Please share this information with the Datacoves team.\n \n \n \n \n Airflow\n \n \n \n \n EFS file system for airflow logs\n \n \n Follow the instructions to \"Create EFS in AWS Account\" from\n \n this confluence page\n \n . Don't follow the other sections of the page.\n \n \n As a name use datacoves-[cluster id]-[environment slug]-airflow-logs.\n \n \n It's important to attach the right the EKS security group so the EKS cluster has access to the EFS filesystem. You can find the security group id in the EKS cluster admin page, Networking tab, under\n \n Additional security groups\n \n .\n \n \n \n \n S3 bucket for Airflow dags\n \n \n Due to bitbucket scheduled downtimes we recommend using S3 as the DAGs store to mimimize disruptions.\n \n \n \n Create an S3 bucket per environment, i.e. datacoves-[cluster id]-[environment slug]-airflow-dags (datacoves-ensemble-pro001-airflow-dags)\n \n \n Create an IAM policy that grants read/write access to the new S3 bucket created, use the same name convention used for the S3 bucket.\n \n \n Follow\n \n this instructions\n \n to create an IAM Role, up to \"Create IAM Role For K8s Service Account\", attach the policy you created on step 2. Name the IAM role using the same convention you used for the S3 bucket\n \n \n Do not associate the IAM role to a K8s Service Account, that part is managed by Datacoves.\n \n \n Create a IAM user for jenkins to upload the dbt project and dags to S3. Use the same naming convention. Attach the same policy you created on step 2.\n \n \n \n \n \n Trusted policy example:\n \n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Principal\": {\n \"Federated\": \"arn:aws:iam::327112934799:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB\"\n },\n \"Action\": \"sts:AssumeRoleWithWebIdentity\",\n \"Condition\": {\n \"StringLike\": {\n \"oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB:sub\": \"system:serviceaccount:dcw-dev123:dev123-airflow-*\",\n \"oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB:aud\": \"sts.amazonaws.com\"\n }\n }\n }\n ]\n}\n\n \n \n \n DBT API\n \n \n \n Create an S3 bucket.\n \n \n Choose a bucket name, we suggest using\n \n _dbt_api where\n \n could be\n \n ensemble\n \n ,\n \n ensembletest\n \n , etc.\n \n \n \n \n Create an IAM user with a policy to access the bucket, like the one below,\n replacing\n \n {your_bucket_name}\n \n with your bucket's name.\n \n \n Create an access key for the user. Share it with the Datacoves team.\n \n \n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:PutObject\",\n \"s3:GetObject\",\n \"s3:GetObjectVersion\",\n \"s3:DeleteObject\",\n \"s3:DeleteObjectVersion\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}/*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\",\n \"s3:GetBucketLocation\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}\"\n }\n ]\n}\n\n \n \n \n Grafana\n \n \n Grafana requires an S3 bucket with lifecycle management enabled.\nFollow\n \n this guide\n \n to configure it accordingly.\n \n \n \n \n Airbyte\n \n \n \n S3 bucket for airbyte logs, an IAM user with a policy to access it, and an\n access key for the user.\n \n \n \n \n \n S3 bucket for airbyte logs\n \n \n \n Create an S3 bucket.\n \n \n Create an IAM user with a policy to access the bucket, like the one below,\n replacing\n \n {your_bucket_name}\n \n with your bucket's name.\n \n \n Create an access key for the user. Share it with the Datacoves team.\n \n \n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:PutObject\",\n \"s3:GetObject\",\n \"s3:GetObjectVersion\",\n \"s3:DeleteObject\",\n \"s3:DeleteObjectVersion\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}/*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\",\n \"s3:GetBucketLocation\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}\"\n }\n ]\n}\n\n \n \n \n Data warehouse connection templates\n \n \n Please define how your data warehouse architecture will look and define the connection templates for both Analytics Engineers and Services, I.e. on a Snowflake database you’ll need to specify fields such as account, warehouse, database, role.\n \n \n \n \n Terraform\n \n \n Some work has been done (repo:\n \n itx-azt/iac\n \n ) to automate the creation of\nthese cluster requirements using terraform. However, because of authorization\nrestrictions imposed on terraform in jnj, it still requires manual\nintervention. At the moment it is probably faster overall to do everything\nmanually."},"3":{"url":"/docs_output/client-docs/jnj/2-configuration.html","snip":"Edit on github\n \n \n \n \n \n \n Configuring datacoves\n \n \n Configuration for each cluster is kept in a separate repository. They are\nmounted as git submodules under\n \n config/{cluster_domain}\n \n .\n \n \n You will need to create this git repository if there isn't one already for your\ncluster. Grant access to datacoves staff to this repo so we can initialize the\nconfiguration files and add the people that will do configuration or deployment\nto the git secret keyring.\n \n \n Clone this configuration to make changes to it. Alternatively, if you will run\nthe datacoves deployment from the same machine you can clone the datacoves_deployment\nrepository which has the configuration repos as\n \n git submodules\n \n .\n \n git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git\ncd datacoves_deployment\ngit submodule init\ngit submodule update config/$cluster_domain # Specify the path to the submodule to update.\ncd config/$cluster_domain # Config repo cloned as submodule in here.\n\n \n After the initial setup, the workflow to update configuration is as follows:\n \n # From within the cluster configuration repo.\n\n# 1. Fetch the latest configuration.\ngit checkout main\ngit pull\ngit secret reveal -f\n\n# 2. Make your changes (see below what's required).\n\n# 3. Commit and push your changes.\ngit secret hide\ngit diff # Review your changes, all sensitive data should be encrypted.\ngit add .\ngit commit -m 'Updated secrets/configuration.'\ngit push\n\n \n \n \n What values are required?\n \n \n Initially the configuration files will contain\n \n TODO\n \n comments to mark the\nplaces where values need to be filled in. Run\n \n grep -r . TODO\n \n to see what's\npending. Remove the\n \n TODO\n \n comments when you add a value. Most values are used\nto configure the external services that were created during\n \n cluster setup\n \n .\n \n \n The configuration variable names should give you an indication of what's needed.\nIf in doubt, ask.\n \n \n The requirements for each datacoves service follow. The list may be a useful\nguide but it could be out of date. Please rely on the\n \n TODO\n \n marks, not on the\nlist, as authoritative information.\n \n \n \n \n Datacoves core\n \n \n \n Datacoves api DB host (\n \n DB_HOST\n \n ) and password (\n \n DB_PASS\n \n ) in\n \n secrets/core-api.env\n \n \n \n PING_CLIENT_ID and PING_CLIENT_SECRET in\n \n secrets/core-api.env\n \n \n \n Ping group names in\n \n cluster-params.yaml\n \n , under\n \n project\n \n .\n \n \n Postgres DB Provisioner for services such as airbyte/airfow/superset in\n \n cluster-params.secret.yaml\n \n under\n \n postgres_db_provisioner\n \n .\n \n \n \n \n \n DBT Docs\n \n \n \n Deploy credentials in\n \n cluster-params.secret.yaml\n \n under\n \n deploy_credentials\n \n .\n \n \n \n \n \n Airbyte\n \n \n Not yet documented.\n \n \n \n \n Airflow\n \n \n The EFS CSI driver installed by cloudx is usually outdated (v1.0.0) so we need to opt out from the cloudx managed service.\n \n \n To do so, submit a PR to have Cloudx stop managing the currently installed driver here: https://sourcecode.jnj.com/projects/ITX-AED/repos/cloudx_container_pipelines_configs/browse/argocd/config.yaml#19\n \n \n \n Airflow EFS volume_handle (fs id) in:\n \n environments/dev123/airflow.secret.yaml\n \n \n \n \n \n \n Superset\n \n \n Not yet documented."},"4":{"url":"/docs_output/client-docs/jnj/3-configure-bastion-ec2-instance.html","snip":"Edit on github\n \n \n \n \n \n \n Configure Bastion EC2 instance\n \n \n \n \n JNJ\n \n \n Name: \nHost: AWSAZTIRLL000Q.jnj.com\n \n \n \n \n SSH to instance\n \n \n \n In your AWS workspace/Microsoft Remote Desktop (windows) open a terminal\n \n ssh 10.157.82.138 -m hmac-sha2-512\n \n or\n \n \n Create a shortcut to ssh pointing to\n \n C:\\Windows\\System32\\OpenSSH\\ssh.exe 10.157.82.138 -m hmac-sha2-512\n \n \n \n Click on the shortcut and type your password to access the instance\n \n \n \n \n \n CHAP\n \n \n Name: itx-wcr-EKS workstation\nHost: awswcrnval001n.kenvue.com\n \n \n \n \n Request role\n \n \n In your\n \n Remote Desktop\n \n go to\n \n IAM\n \n :\n \n \n \n Request / Star a new request\n \n \n Request the following roles:\n \n \n ITS-ITX-WCR-Datacove-Prd-K8sOperator\n \n \n ITS-ITX-WCR-Datacove-Prd-K8sMonitor\n \n \n ITS-ITX-WCR-Datacove-Prd-K8sAdmin\n \n \n ITS-EP-AWSWCRNVAL001N-LINUX-NA-UNIXSEAdmins\n \n \n \n \n Details:\n \n \n Job role: Datacoves Support\n \n \n Application ID: APP000300001207\n \n \n Application Name: DATACOVES-ANALYTICS PRODUCTION WORKBENCH FOR ELT & ORCHESTRATION\n \n \n Describe, in detail, the job functions you perform that REQUIRE this level of privilege: We maintain and support the Datacoves application which runs on Kubernetes.\n \n \n Is the Application Software (includes Web Components, Vendor Application), installed on the Server on which you are requesting Admin Access? No / Yes: No\n \n \n Frequency of Need: Weekly\n \n \n \n \n Submit\n \n \n \n \n \n SSH to instance\n \n \n \n On the terminal run command\n \n ssh 10.79.29.123\n \n \n \n Your user should be added to the following groups in\n \n /etc/groups\n \n \n \n \n \n \n Create your working directory\n \n \n Create your working directory under\n \n /app/users\n \n , i.e.\n \n /app/users/ssassi\n \n .\n \n \n \n \n Grant you access to docker\n \n sudo su -\nvi /etc/group\n\n \n Example:\n \n datacoves:x:8653:amorer01,<my-user> # To chap\ndocker:x:187:amorer01,<my-user>\n\n \n \n \n Configure your home folder (~)\n \n \n \n Copy the contents of\n \n /app/users/datacoves-home-template\n \n to your home folder:\n \n \n cp -R /app/users/datacoves-home-template/. ~/\n\n \n \n Exit and reconnect to the instance to ensure that the\n \n .bashrc\n \n script was ran accordingly\n \n \n Fix kubelogin permissions\n \n \n asdf uninstall kubelogin\nasdf install kubelogin\n\n \n \n Configure your credentials to the clusters\n \n \n kc config get-contexts\nkc config use-context <choose one>\nkc get ns\n\n \n Note: you'll need to change your ~/.kube/config permissions:\n \n chmod 600 ~/.kube/config\n\n \n \n \n Clone datacoves deployment repo\n \n /app/users/<your username>\ngit clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git\n\n \n After clonning, follow the instructions to reveal secrets and install requirements."},"5":{"url":"/docs_output/client-docs/jnj/4-configure-bitbucket-and-jenkins.html","snip":"Edit on github\n \n \n \n \n \n \n How to configure Bitbucket project and connect it with Jenkins project\n \n \n \n \n Bitbucket\n \n \n \n \n Ensure you enable the following hooks in your Bitbucket project\n \n \n \n JnJ VPCx - Post Receive Repository Hook for SCM\n \n \n Webhook to Jenkins for Bitbucket Server\n \n \n \n \n \n \n \n \n JnJ VPCx - Post Receive Repository Hook for SCM\n \n \n \n \n \n \n \n Webhook to Jenkins for Bitbucket Server\n \n \n \n \n Tab 1\n \n \n \n \n \n \n \n Tab 2\n \n \n \n \n \n \n \n Tab 3\n \n \n \n \n \n \n \n Enable the following Merge Checks\n \n \n \n \n \n \n \n Request access to taqy-docker for the project service account\n \n \n Typically the service account created automatically is\n \n sa-itsus-<PROJECT CODE>-devusr\n \n .\n \n \n Go to App Dev Tools and request access for that user, like so:\n \n \n \n \n \n \n \n Jenkins\n \n \n \n \n Ensure Bitbucket plugins were correctly configured\n \n \n Navigate to Manage Jenkins -> Configure System and modify the following plugins:\n \n \n \n \n \n \n \n \n \n \n Create Multibranch pipeline project\n \n \n At Home page -> \"+ New Item\":\n \n \n \n \n \n \n \n Configure branch sources\n \n \n \n \n \n \n \n Configure repo behaviors\n \n \n \n \n \n \n \n Set up build configuration and other items\n \n \n \n \n \n \n \n \n \n \n Jenkinsfile dependencies\n \n \n You'll need a credential that stores the secrets used to connect to your Data Warehouse.\n \n \n Create a new credential in the Jenkins Admin area. As of Aug. '23 those can be found in:\n \n \n \n Dashboard -> Credentials -> System -> Global Credentials (unrestricted)\n \n \n \n \n \n \n \n \n \n \n \n Known issues\n \n \n \n When \"pre hook declined\" it could be due to JIRA issues configuration: from settings ->\n \n Jira Issues\n \n select \"Use custom settings\" and be sure \"Don't need a Jira issue key\" is selected"},"6":{"url":"/docs_output/client-docs/jnj/5-deployment.html","snip":"Edit on github\n \n \n \n \n \n \n How to deploy (or update) datacoves to a kubernetes cluster\n \n \n Prerequisites:\n \n cluster and external resources setup\n \n .\n \n \n SSH into a machine with kubectl access to the cluster from where you will run\nthe installation scripts. Then:\n \n # Set these as needed for your cluster.\ncluster_domain=FILL_IN # e.g. ensembletest.apps.jnj.com\nkubectl_context=FILL_IN # e.g. itx-ank-ensemble-test\n\n# Clone the repository into the installation workstation (required once).\ngit clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git\ncd datacoves_deployment\n\n# Install python dependencies for the installation scripts (required once).\npip3 install --user -r requirements.txt\n\n# Fetch the latest changes and reveal the secrets in the config submodule directory.\ngit pull\ngit submodule update --init\n(cd config/$cluster_domain; git secret reveal -f)\n\n# Install datacoves base dependencies into the cluster (ingress-nginx, etc.)\n# Usually not required after the first time datacoves is released to a cluster.\n./cli.py setup_base $kubectl_context $cluster_domain\n\n# Deploying ingress-nginx will create an ELB. Use the following command to retrieve it's URL.\nkubectl --context $kubectl_context get -A svc | grep LoadBalancer\n\n# Update cluster-params.yaml setting external_dns_url to that URL.\n$EDITOR config/$cluster_domain/cluster-params.yaml\n# Commit the change.\n\n# Install/update datacoves.\n./cli.py install"},"7":{"url":"/docs_output/client-docs/jnj/6-access.html","snip":"Edit on github\n \n \n \n \n \n \n Access Datacoves on JNJ clusters\n \n \n \n \n Requesting Roles\n \n \n \n \n NOTE:\n \n Please inform Martin Ryan before requesting appdevtools roles\n \n \n \n In order to have access to all third-party tools (Bitbucket, Jenkins, Artifactory, etc.) you must request specific roles.\n \n \n To do so, you must go to https://appdevtools.jnj.com and request the\n \n Datacoves Support\n \n Model User template.\n \n \n Make sure to write\n \n Needed for Datacoves platform support\n \n as requesting reason.\n \n \n \n \n \n \n \n Datacoves Access\n \n \n In order to get access as an admin and developer on the different JNJ clusters you need to request the following AD groups:\n \n \n \n \n Medical Devices\n \n \n \n JNJ-APP-HMD-DATACOVES-ADMIN\n \n \n JNJ-APP-HMD-DATACOVES-DEVELOPER\n \n \n \n \n \n Consumer Health / Kenvue\n \n \n Go to iam.kenvue.com, search for\n \n Datacoves\n \n . There's one role for Admin and one per Environment. You need to request\n \n ADMIN\n \n ,\n \n *-DEVELOPER\n \n and\n \n *-SYSADMIN\n \n roles. For example:\n \n \n \n ITS_APP_DATACOVES_ADMIN\n \n \n ITS_APP_DATACOVES_DEV_CBI_VMA_DEVELOPER\n \n \n ITS_APP_DATACOVES_DEV_CBI_VMA_SYSADMIN\n \n \n ITS_APP_DATACOVES_DEV_COMX_CUST_DEVELOPER\n \n \n ITS_APP_DATACOVES_DEV_COMX_CUST_SYSADMIN\n \n \n ITS_APP_DATACOVES_DEV_COMX_GLOBAL_DEVELOPER\n \n \n \n \n \n Ensemble\n \n \n \n JNJ-APP-PCE-DATACOVES-ADMIN\n \n \n JNJ-APP-PCE-DATACOVES-DEVELOPER\n \n \n \n \n \n R&D\n \n \n \n ITS-APP-DEV-JRDDATACOVES-ADMIN\n \n \n ITS-APP-DEV-JRDDATACOVES-DEVELOPER\n \n \n \n \n \n taqy Access\n \n \n taqy is the docker repository used by all J&J instances. Access to it is necessary in order to manage images on it.\n \n \n To request access, use https://appdevtools.jnj.com\n \n \n \n Request Access, By User, Other\n \n \n Enter your username\n \n \n Tool: EAT Jenkins and Artifactory\n \n \n Team: taqy\n \n \n Reason for request: Access to CI images for DataCoves\n \n \n Grant these roles: ITS-ASx-TAQY-DEV-Executors, ITS-ASx-TAQY-DEV-Viewers\n \n \n \n For reference, the main Ensemble Jenkins user is sa-itsus-jbfl-devusr\n \n \n \n \n Snowflake Access\n \n \n As done with the groups above, you must also request\n \n JNJ-APP-PCE-SNOWFLAKE-EMEA-DEVELOPER\n \n \n \n \n \n How to request the groups?\n \n \n Using the AWS workspace:\n \n \n \n Navigate to https://iam.jnj.com\n \n \n Click on\n \n Start new request\n \n \n \n Type the group name on the\n \n Find a service item\n \n search box.\n \n \n Click on\n \n Request\n \n button\n \n \n In the popup, leave\n \n Valid from\n \n and\n \n Valid until\n \n empty, in the\n \n reason\n \n field type \"Datacoves support team\"."},"8":{"url":"/docs_output/client-docs/jnj/7-configure-sa-docker-in-kubernates.html","snip":"Edit on github\n \n \n \n \n \n \n How to configure Service Account Docker in Kubernetes for pull images.\n \n \n \n JnJ\n \n and\n \n Kenvue\n \n are using their own private Docker artifact repositories. In order to download images from those repositories in Kubernetes we need to create secrets with valid credentials in each Kubernetes cluster.\n \n \n This process is documented by JnJ at\n \n conflunce\n \n .\n \n \n \n \n Select Kubernates context\n \n kubectl config get-contexts\nkubectl config use-context <context>\n\n \n \n \n Delete old service account (If it already exists)\n \n kubectl get secrets -n default\nkubectl delete secret taqy-docker -n default\n\n \n \n \n Create new service account\n \n # Create secret in default namespace - Recommended to use the EAT service account username and password for credentials\nkubectl create secret docker-registry taqy-docker --docker-server=jnj.artifactrepo.jnj.com --docker-username=<service-account-username> --docker-password=<service-account-password> -n default\n\n# Annotate secret to sync across all namespaces\nkubectl annotate secret taqy-docker cluster.managed.secret=\"true\" -n default\n\n \n \n \n Inspect the new secret\n \n kubectl -n default get secret taqy-docker -o yaml\n\n \n Copy the value from\n \n data.dockerconfigjson\n \n \n echo <value> | base64 -d\n\n \n Note: Check that the secrets have been replicated to all namespaces. (Can check one or two)\n \n kubectl -n <namespace> get secret taqy-docker -o yaml\necho <value> | base64 -d\n\n \n If the secret was not replicated, check the pod's logs:\n \n kubectl -n kube-system get pods\nkubectl -n kube-system logs namespace-secrets-sync-<hash> --tail 100"},"9":{"url":"/docs_output/client-docs/jnj/8-summary-requirements-new-cluster.html","snip":"Edit on github\n \n \n \n \n \n \n Summary for the requirements of a new Cluster.\n \n \n For more details check\n \n Cluster requirements\n \n \n \n \n \n Database (RDS)\n \n \n \n \n Minimum requirements\n \n \n \n Engine: Postgres\n \n \n Version: 14.9\n \n \n Multi-AZ DB Cluster.\n \n \n Master user: postgres\n \n \n Master password:\n \n \n \n \n Instance class: db.r5.large\n \n \n Storage type: Aurora Standard or gp2\n \n \n Allocated_storage: 100GB\n \n \n Enable storage autoscaling\n \n \n Maximum storage threshold: 1TB\n \n \n Authentication: password\n \n \n \n \n \n EKS\n \n \n \n \n Configuration\n \n \n \n External DNS.\n \n \n \n m5.xlarge\n \n instances.\n \n \n \n \n \n Worker groups\n \n \n \n General\n \n \n Volumed\n \n \n Workers\n \n \n \n \n \n General\n \n \n \n min_nodes: 1\n \n \n max_nodes: 30\n \n \n root_volume_size: 200\n \n \n labels:\n \n \n labels:\n ...\n - key: k8s.datacoves.com/nodegroup-kind\n value: general\n\n \n \n \n Volumed\n \n \n \n min_nodes: 1\n \n \n max_nodes: 30\n \n \n root_volume_size: 200\n \n \n labels:\n \n \n labels:\n ...\n - key: k8s.datacoves.com/nodegroup-kind\n value: volumed\n\n \n \n \n Workers\n \n \n \n min_nodes: 1\n \n \n max_nodes: 30\n \n \n root_volume_size: 200\n \n \n labels:\n \n \n labels:\n ...\n - key: k8s.datacoves.com/workers\n value: enabled\n\n \n \n \n Other configuration.\n \n \n \n EFS for each environment for\n \n Airflow Logs\n \n .\n \n \n S3 buckets for each environment for\n \n Dags sync\n \n , with read-only permissions. (Optional. Can be git-sync).\n \n \n One S3 bucket for\n \n Observavility stack\n \n . Example\n \n ensemble-prd-observability-grafana-loki\n \n . (Full permissions)\n \n \n One S3 bucket for\n \n dbt-api\n \n . Example\n \n ensemble-prd-dbt-api\n \n . (Full permissions)\n \n \n \n \n \n Example for full S3 bucket permission\n \n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:PutObject\",\n \"s3:GetObject\",\n \"s3:GetObjectVersion\",\n \"s3:DeleteObject\",\n \"s3:DeleteObjectVersion\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}/*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\",\n \"s3:GetBucketLocation\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}\"\n }\n ]\n}"},"10":{"url":"/docs_output/client-docs/jnj/","snip":"Edit on github\n \n \n \n \n \n \n Datacoves deployment\n \n \n This repository contains the datacoves installation scripts. They install\ndatacoves to an existing EKS cluster, based on the configuration files in the\n \n config\n \n directory. Configuration for each cluster is kept in a separate\nrepository. They are mounted as git submodules under\n \n config/{cluster_domain}\n \n .\n \n \n Before running the installation scripts the EKS cluster and other required AWS\nresources must be created. See\n \n cluster requirements\n \n .\n \n \n Then a repository to use as the cluster configuration submodule must be created.\nSee\n \n configuration\n \n .\n \n \n After that, deployment can begin. See\n \n deployment\n \n ."},"11":{"url":"/docs_output/client-docs/kenvue/how-to-setup-helm-chart.html","snip":"Edit on github\n \n \n \n \n \n \n How to set up Helm Chart on kenvue\n \n \n Artifacory: https://kenvue.jfrog.io\nRepository: dco-helm\nCredentials: See 1password\nProtocol: OCI\n \n \n Steps:\n \n \n \n Artifactory login.\n \n \n Download or build the helm chart.\n \n \n Upload the new helm chart.\n \n \n Check the new helm chart.\n \n \n Install the helm chart.\n \n \n \n \n \n 1. Artifactory login\n \n helm registry login https://kenvue.jfrog.io/dco-helm\n\n \n \n \n 2. Build or download the helm chart.\n \n \n In this case as an example we are going to download a helm chart from the JnJ artifactory\n \n wget --user <my-user> --password <my-password> https://artifactrepo.jnj.com:443/artifactory/jnj-helm-charts/metrics-server-3.12.2.tgz\n\n \n \n \n 3. Upload the new helm chart.\n \n helm push metrics-server-3.12.2.tgz oci://kenvue.jfrog.io/dco-helm/metrics-server\n\n \n \n \n 4. Check the new helm chart.\n \n helm show all oci://kenvue.jfrog.io/dco-helm/metrics-server\n\n \n \n \n 5. Install the helm chart.\n \n helm install my-release oci://kenvue.jfrog.io/dco-helm/metrics-server --version 3.12.2"},"12":{"url":"/docs_output/client-docs/orrum/","snip":"Edit on github\n \n \n \n \n \n \n Datacoves deployment\n \n \n This section contains instructions on how to connect to Orrum infra via azure to build, maintain, and monitor datacoves deployments.\n \n \n \n \n VPN Connection\n \n \n kubectl requires connection to Orrum VPN. Download\n \n Azure VPN Client\n \n .\n \n \n The profile can be downloaded from Azure; login with Support_Datacoves@orrumcorp.onmicrosoft.com with the credentials from 1Password.\n \n \n https://portal.azure.com/#@orrum.com/resource/subscriptions/0f8e4c48-c319-4ed9-af14-ef50501e3a41/resourceGroups/DataCoves/providers/Microsoft.Network/virtualNetworkGateways/DataCovesGateway/pointtositeconfiguration\n \n \n Click \"Download VPN client\" in the header, and you will get a zip file with the profile files; you will want the Azure client profiles, and you can use the Import button in the Azure client to import it.\n \n \n To connect to the vpn, use Support_Datacoves@orrumcorp.onmicrosoft.com, credentials on 1Password.\n \n \n \n \n kubectl setup\n \n # Ensure Python is Installed\npipx install az-cli --include-deps\n\n# Get login password from 1pswd\naz login -u Support_Datacoves@orrumcorp.onmicrosoft.com\n\n# Install kubectl + kubelogin\naz aks install-cli\n\n# Set subscription\naz account set --subscription 0f8e4c48-c319-4ed9-af14-ef50501e3a41\n\n# Get credentials for new cluster\naz aks get-credentials --resource-group DataCoves --name Datacoves_kube\n\n# List contexts\nkubectl config use-context Datacoves_kube\n\n \n \n \n Rename Context\n \n \n It is very important that the context be named orrum-new as things such as updating the cluster will have scripts that depend on the context name.\n \n kubectl config rename-context Datacoves_kube orrum-new\nkubectl config use-context orrum-new\n\n \n Now verify connectivity with\n \n kubectl get ns\n \n \n \n \n \n Config DNS on\n \n /etc/hosts\n \n (Optional)\n \n \n Note: This is probably not necessary anymore.\n \n \n You can force the domain and subdomains DNS if it's not configured.\n \n 10.10.0.36 datacoves.orrum.com\n10.10.0.36 api.datacoves.orrum.com\n10.10.0.36 authenticate-dev123.datacoves.orrum.com\n10.10.0.36 dev123.datacoves.orrum.com\n10.10.0.36 airbyte-dev123.datacoves.orrum.com\n10.10.0.36 dbt-docs-dev123.datacoves.orrum.com\n10.10.0.36 airflow-dev123.datacoves.orrum.com\n10.10.0.36 superset-dev123.datacoves.orrum.com\n10.10.0.36 grafana.datacoves.orrum.com\n\n# <user>\n10.10.0.36 <user>-1-transform-dev123.datacoves.orrum.com\n10.10.0.36 <user>-1-dbt-docs-dev123.datacoves.orrum.com\n10.10.0.36 <user>-transform-dev123.datacoves.orrum.com\n\n \n \n Note: Check the cluster's Public IP\n \n 10.10.0.36"},"13":{"url":"/docs_output/dev-logs/2021-06-create-er-diagram.html","snip":"Edit on github\n \n \n \n \n \n \n How to create an entity relationship diagram\n \n ./cli.py pod-sh\napt install graphviz-dev\npip3 install pygraphviz\n./manage.py graph_models -a -X *Mixin,Abstract*,ContentType,Session,Nonce,Partial,TokenProxy -g -o core-erd.png\n\n \n Learn more at https://django-extensions.readthedocs.io/en/latest/graph_models.html"},"14":{"url":"/docs_output/dev-logs/2021-09-eks-setup.html","snip":"Edit on github\n \n \n \n \n \n \n Installation\n \n \n \n \n Set up IAM user\n \n \n IAM user needs the following privileges to create the cluster:\n \n \n https://eksctl.io/usage/minimum-iam-policies/\n \n \n \n \n AWS CLI\n \n \n Install AWS CLI in your local environment\n \n \n https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html\n \n \n \n \n Configure credentials\n \n \n \n Generate access key\n \n \n Configure your credentials\n \n \n \n \n \n Install eksctl\n \n \n Install eksctl\n \n \n https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html\n \n \n \n \n On Mac\n \n brew tap weaveworks/tap\nbrew install weaveworks/tap/eksctl\n\n \n \n \n Create cluster\n \n \n https://eksctl.io/usage/creating-and-managing-clusters/\n \n eksctl create cluster -f cluster.yaml --tags service=datacoves\n\n \n \n \n Install metrics server\n \n \n https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html\n \n kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml\n\n \n \n \n Kubernetes dashboard\n \n \n https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html\n \n kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml\nkubectl apply -f eks-admin-service-account.yaml\n\n \n \n \n Open dashboard\n \n kubectl proxy\n\n \n http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#!/login\n \n \n Get a login token with:\n \n kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}')\n\n \n \n \n Configure Docker hub\n \n kubectl create ns alpha2\nkubectl create secret docker-registry docker-secret \\\n--docker-server=\"https://index.docker.io/v1/\" \\\n--docker-username=\"<USER_NAME>\" \\\n--docker-password=\"<PASSWORD>\" \\\n--docker-email=\"<EMAIL>\" \\\n--namespace=\"alpha2\"\n\n \n \n \n EKS (k8s on AWS)\n \n # Create the cluster https://eksctl.io/usage/creating-and-managing-clusters/\neksctl create cluster -f eks/eks-cluster.yaml\n\n# (Optional) Inspect the config that kustomize generates\nkubectl kustomize eks\n\n# Apply the kustomization directory to the cluster\nkubectl apply -k eks\n\n \n \n \n Kubernetes dashboard\n \n \n To open the dashboard run\n \n kubectl proxy\n \n and navigate to:\n \n \n http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#!/login\n \n # Get a login token with\nkubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}')"},"15":{"url":"/docs_output/dev-logs/2021-12-jnj-ensembledev-deployment.html","snip":"Edit on github\n \n \n \n \n \n \n Datacoves deployment\n \n \n This document describes the deployment of the components of a datacoves system\nto a JnJ EKS kubernetes cluster.\n \n \n \n \n Prerequisites\n \n \n \n This confluence page\n \n should be followed prior to the steps outlined here to deploy datacoves. It\nshould document how to setup an EKS cluster with the necessary prerequisites,\nand how to create and configure the required AWS services used.\n \n \n We assume here that there is a EKS cluster running with certain services already\ndeployed on it. The cluster is setup through CI from the git repo at\nhttps://sourcecode.jnj.com/projects/ITX-AZT/repos/ensemble.\nWe require the following systems running in the cluster:\n \n \n \n ingress-nginx as an ingress controller.\n \n \n cert-manager to issue SSL certificates.\n \n \n external-dns to create DNS rules from annotations.\n \n \n A system that creates a new kubernetes secret with a known name with\n credentials to pull docker images in every namespace of the cluster.\n \n \n \n The machine from where the deployments scripts will be run must have python3 and\ngit installed, as well as kubectl (client) version 1.21 or higher, configured\nto access the cluster with broad permissions.\n \n \n We also assume the docker registry / repository that you configure to pull\nimages has all the docker images required. Datacoves will build and push this\nimages. The list of images used by a cluster, computed from this repo's\nconfiguration, can be displayed with\n \n ./cli.py images ensembledev.apps.jnj.com\n \n ,\nor in general\n \n ./cli.py images CLUSTER_DOMAIN\n \n .\n \n \n \n \n Initial setup and configuration\n \n \n Clone the datacoves_deployment git repository and change directory to it.\n \n git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git\ncd datacoves_deployment\n\n \n Configuration is stored in the repo, encrypted using git-secret. You will need\nto be in the repo's git secret keyring to decrypt them. Ask someone already in\nthe keyring for access (e.g. spelufo@its.jnj.com).\n \n \n Decrypt the configuration secrets. The\n \n -f\n \n flag will overwrite existing files.\n \n git secret reveal -f\n\n \n The\n \n config\n \n directory holds configuration files. Each subdirectory holds\nconfiguration for a kubernetes cluster and must be named after the cluster\ndomain name. For example, the configuration for the current (2021) version of\ndatacoves is in\n \n config/ensembledev.apps.jnj.com\n \n .\n \n \n If deploying to a new cluster, create a new directory under config based on\n \n config/ensembledev.apps.jnj.com\n \n . You will need to use\n \n git secret add\n \n and\n \n git secret hide\n \n to add your new secrets to the repo and encrypt them before\ncommiting them.\n \n \n \n \n Deploying datacoves core web application\n \n \n First, make sure your kubectl context is appropiate for the cluster.\n \n CLUSTER_DOMAIN=ensembledev.apps.jnj.com\nKCTX=$(kubectl config current-context)\n\n# Deploy the datacoves core api server to the core namespace.\n./cli.py setup_core \"$KCTX\" \"$CLUSTER_DOMAIN\"\n\n \n Enter an api server pod and run database migrations:\n \n kubectl -n core exec -it $(kubectl -n core get pods -l app=core-api -o name) -- bash\n\n# From inside the pod:\n./manage.py migrate\n./manage.py loaddata */fixtures/*\n\n \n Check the server is running:\n \n $ kubectl -n core get pods\nNAME READY STATUS RESTARTS AGE\ncore-api-deployment-5f8f64cf69-6rvhd 1/1 Running 0 3d19h\n\n \n \n \n Deploying datacoves project operator\n \n \n The datacoves project operator manages two\n \n CRDs\n \n :\ndatacoves.com/Project and datacoves.com/User. To deploy the operator, run:\n \n ./cli.py setup_operator \"$KCTX\" \"$CLUSTER_DOMAIN\"\n\n \n To check the operator is running, and/or see its logs:\n \n $ kubectl -n operator-system get pods\nNAME READY STATUS RESTARTS AGE\noperator-controller-manager-78cc7cfb6-9ddkw 2/2 Running 0 47h\n\n$ kubectl -n operator-system logs -l control-plane=controller-manager -c manager -f\n\n \n \n \n Deploying a datacoves project namespace\n \n \n Every project is deployed to a namespace named\n \n dcp-{project_name}\n \n . The\nsetup_project script creates a new namespace and project kubernetes object from\nthe configuration file in\n \n config/{cluster_domain}/projects/{project_name}.yaml\n \n .\nThe operator will detect changes to this object and create deployments and other\nresources for the project.\n \n PROJECT_NAME=emeadev\n./cli.py setup_project \"$KCTX\" \"$CLUSTER_DOMAIN\" \"$PROJECT_NAME\"\n\n \n To watch for pod status changes as the operator create's the project resources:\n \n kubectl -n \"dcp-$PROJECT_NAME\" get pods --watch"},"16":{"url":"/docs_output/dev-logs/2022-04-jnj-artemisdev-configuration.html","snip":"Edit on github\n \n \n \n \n \n \n Configuring datacoves\n \n \n Requirements: Access to a datacoves configuration git repo and being in it's git secret keyring.\n \n \n First pull the latest changes and reveal the git secrets.\n \n git checkout main\ngit pull\ngit secret reveal -f\n\n \n I've marked with\n \n TODO\n \n the values that need to be filled in:\n \n \n \n Airflow DB connection in:\n \n environments/dev123/airflow.secret.yaml\n \n \n \n Airflow EFS volume_handle (fs id) in:\n \n environments/dev123/airflow.secret.yaml\n \n \n \n Datacoves api DB host (\n \n DB_HOST\n \n ) and password (\n \n DB_PASS\n \n ) in\n \n secrets/core-api.env\n \n \n \n PING_CLIENT_ID and PING_CLIENT_SECRET in\n \n secrets/core-api.env\n \n \n \n \n After editing those files to add the required values commit the changes with:\n \n git secret hide\ngit diff # Review your changes, all sensitive data should be encrypted.\ngit add .\ngit commit -m 'Updated secrets.'\ngit push"},"17":{"url":"/docs_output/dev-logs/2022-04-jnj-ensembletest-deployment.html","snip":"Edit on github\n \n \n \n \n \n \n Datacoves deployment\n \n \n This repository contains the datacoves installation scripts. They install datacoves to an existing EKS cluster, based on the configuration files in the\n \n config\n \n directory. Configuration for each cluster is kept in a separate repository. They are mounted as git submodules under\n \n config/{cluster_domain}\n \n .\n \n \n Prior to this, the EKS cluster and other required AWS resources must be created. The clusters are created through CloudX pipelines, from\n \n cluster.yaml\n \n files in other repositories like\n \n itx-ank/ensemble\n \n . Additional AWS resources are created using terraform from the\n \n iac\n \n repository.\n \n \n Once these prerequisites are done, and the configuration repository for the cluster has been updated accordingly, the installation is as follows.\n \n # Set these as needed for your cluster.\ncluster_domain=ensembletest.apps.jnj.com\nkubectl_context=itx-ank-ensemble-test\n\n# Clone this repository into the installation workstation.\ngit clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git\ncd datacoves_deployment\ngit submodule update --init\n\n# Reveal the secrets in the config submodule directory.\n(cd config/$cluster_domain; git secret reveal -f)\n\n# Install python dependencies for the installation scripts.\npip3 install --user -r requirements.txt\n\n# Install datacoves base dependencies into the cluster (ingress-nginx, etc.)\n./cli.py setup_base $kubectl_context $cluster_domain\n\n# Install datacoves.\n./cli.py install $kubectl_context $cluster_domain"},"18":{"url":"/docs_output/dev-logs/2022-05-setup-aks-postgres-flexible-server.html","snip":"Edit on github\n \n \n \n \n \n \n Set up postgres flexible server on Azure\n \n \n \n Find it\n \n here\n \n \n \n Connect to it using this command:\n \n \n psql -h datacoves-east-us.postgres.database.azure.com -U dcmaster -d postgres\n\n \n \n Create the\n \n datacoves\n \n user that will be used by Django:\n \n \n CREATE USER datacoves password '<PASSWORD>';\nALTER USER datacoves CREATEDB CREATEROLE;\nGRANT datacoves TO dcmaster;\nCREATE DATABASE datacoves OWNER datacoves;\nGRANT CONNECT ON DATABASE datacoves TO datacoves;\n\n \n \n Dump data from internal Database\n \n \n pg_dump -U postgres -h postgres-svc -d datacoves -Fc > dump.sql\n\n \n \n Restore data on new Azure DB\n \n \n pg_restore -U datacoves -h datacoves-east-us.postgres.database.azure.com -d datacoves --no-owner --role=datacoves dump.sql\n\n \n \n Repeate steps 4 and 5 with the rest of the services that need to be migrated\n \n \n \n Keep in mind that database objects owner could be changed, reassign the owner to the corresponding service account, i.e.:\n \n REASSIGN OWNED BY datacoves TO dev123_airbyte;\n\n \n If migrating\n \n temporal\n \n and\n \n temporal_visibility\n \n databases, you also need to update the database name on\n \n schema_versions\n \n .\n \n \n \n \n Set\n \n airbyte_db_external: true\n \n ,\n \n airflow_db_external: true\n \n and\n \n superset_db_external: true\n \n accordingly\n \n \n \n \n Configure\n \n postgres_db_provisioner\n \n using the master user connection/credentials"},"19":{"url":"/docs_output/dev-logs/","snip":"Edit on github"},"20":{"url":"/docs_output/how-tos/administrate-east-us-a-aks-cluster.html","snip":"Edit on github\n \n \n \n \n \n \n Administrate east-us-a AKS cluster\n \n \n \n \n Permissions\n \n \n \n Ask an administrator to create you a datacoves (microsoft) user. https://admin.microsoft.com.\n \n \n Ask an administrator to add you to the\n \n DevOps\n \n \n group\n \n .\n \n \n \n \n \n Configure kubectl\n \n \n \n Download Azure CLI\n \n .\n \n \n Login to your account:\n \n az login\n\n \n Then, run the following commands:\n \n az account set --subscription 91bd2205-0d74-42c9-86ad-41cca1b4822b\naz aks get-credentials --resource-group datacoves --name east-us-a\n\n \n This will add a new context to\n \n kubectl\n \n , so you can now run:\n \n kubectl get pods -A\n\n \n \n \n Manage nodepools\n \n \n \n \n List nodepools\n \n \n List nodepools in the\n \n datacoves\n \n resource group,\n \n east-us-a\n \n cluster:\n \n az aks nodepool list --cluster-name east-us-a --resource-group datacoves\n\n \n \n \n Add workers nodepool\n \n az aks nodepool add --cluster-name east-us-a --resource-group datacoves --name workerslarge --mode User --enable-cluster-autoscaler --min-count 1 --max-count 10 --node-vm-size Standard_D4s_v3 --labels k8s.datacoves.com/workers=enabled\n\n \n \n \n Modify existing nodepool to add new labels\n \n \n Let's add a new label\n \n k8s.datacoves.com/workers=enabled\n \n to an existing nodepool which already has the label\n \n k8s.datacoves.com/nodegroup-kind=general\n \n . Old a new labels need to be specified.\n \n az aks nodepool update --cluster-name east-us-a --resource-group datacoves --name generallarge --labels {k8s.datacoves.com/workers=enabled,k8s.datacoves.com/nodegroup-kind=general}"},"21":{"url":"/docs_output/how-tos/airflow-configuration.html","snip":"Edit on github\n \n \n \n \n \n \n Environment variables override\n \n \n Airflow has a feature that lets you override system's defaults on a per-task basis (see https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/executor/kubernetes.html#pod-override).\n \n \n \n Example \"Log level override\"\n \n :\n \n \"pod_override\": k8s.V1Pod(\n spec=k8s.V1PodSpec(\n containers=[\n k8s.V1Container(\n name=\"base\",\n image=f\"{IMAGE_REPO}:{IMAGE_TAG}\",\n env=[\n k8s.V1EnvVar(\n name=\"AIRFLOW__LOGGING__LOGGING_LEVEL\",\n value=\"DEBUG\"\n )\n ]\n )\n ]\n )\n),"},"22":{"url":"/docs_output/how-tos/billing-system.html","snip":"Edit on github\n \n \n \n \n \n \n Datacoves Billing System\n \n \n This document provides comprehensive information on Datacoves’ billing integration with Stripe.\n \n \n \n \n Introduction\n \n \n Datacoves integrates with Stripe to manage billing by listening to Stripe events and adjusting Datacoves data accordingly. The system also modifies subscriptions when changes occur in services, users, or tally marks.\n \n \n The connection between Datacoves and Stripe begins when a user creates a subscription through the Setup Wizard, or when a Datacoves Admin sets up a subscription directly in the Stripe UI.\n \n \n \n Note\n \n : Free trial accounts\n \n are not connected\n \n to Stripe.\n \n \n \n \n Account Setup Wizard\n \n \n \n \n \n \n \n Customer Types\n \n \n For billing, we distinguish between three customer types:\n \n \n \n Free trial customers\n \n \n Credit card customers\n \n \n Check / bank transfer customers\n \n \n \n \n \n Free trial customers\n \n \n These type of customers are not connected to stripe while they're on trial. During the trial period, Stripe does not have information about these accounts.\n \n \n Free trial customers will see a button on the header inviting them to finalize the trial and create a subscription. Upon subscribing, they transition to\n \n credit card customers\n \n .\n \n \n \n \n Credit card customers\n \n \n Credit card customers workflow is completely managed by Datacoves:\n \n \n \n \n Customer selects\n \n Growth Plan\n \n and after clicking on\n \n Next\n \n Datacoves creates the stripe customer, sets the\n \n customer_id\n \n and redirects them to Stripe billing page where the stripe billing process begins.\n \n \n \n \n Once the customer enters their credit card and completes the Stripe billing process, Datacoves receives a notification and sets the\n \n subscription\n \n payload on the brand new account.\n \n \n \n \n From this point, any updates to services/users/tally marks in the Datacoves account are automatically reflected in Stripe, adjusting invoices accordingly.\n \n \n \n \n \n \n Check / bank customers\n \n \n For customers preferring bank transfers or checks, setup is managed manually through the Stripe UI.\n \n \n \n \n Customer Setup\n \n \n \n Customer creates a Trial account as described earlier.\n \n \n An Stripe Admin\n \n manually creates the customer\n \n using the Stripe UI. In order to follow the same convention used by Datacoves, please use account's\n \n slug\n \n as the stripe customer name, and account's\n \n owner email\n \n as the stripe customer email. Add an address to calculate taxes automatically.\n \n \n Once you got a customer id on stripe, modify the Datacoves account on the admin panel and set it on the\n \n Customer Id\n \n field.\n \n \n Modify the Datacoves account once more and set the right\n \n plan\n \n and\n \n variant\n \n . We typically use\n \n growth\n \n plan for these accounts, the\n \n variant\n \n will be determined by Sales depending on the pricing negotiated.\n \n \n \n \n \n Subscription Setup\n \n \n The Stripe Admin now\n \n creates a subscription on Stripe\n \n for the recently created customer. Please be cautious with the products included in the subscription, they should match exactly the products included in the plan. You can inspect them\n \n here\n \n .\n \n \n You don't need to add the metered products on a new subscription, Datacoves will modify the subscription and add them later. Unless the customer prepaid for developer seats and services, you include the developers seat product specifying the total user licenes and then one product line per service (airbyte, airflow, superset, datahub).\nIn the following example, there are 5 user licences, 1 Airbyte, 1 Airflow and 1 Superset server:\n \n \n \n \n \n NOTE: Certain customers (like Guitar Center) could opt to prepay the developer seats and services costs via Bank transfer / check. In those cases, you only include the metered products in the subscription.\n \n \n Don't forget to set the right plan on the subscription metadata, it's usually\n \n growth-monthly\n \n , if you need a different one, type the\n \n slug\n \n field of the chosen one.\n \n \n \n \n \n On Payment, select\n \n Email invoice to the customer to pay manually\n \n and uncheck\n \n Include a stripe-hosted link to an invoice payment page in the invoice\n \n .\n \n \n Finalize by clicking on\n \n Create subscription\n \n .\n \n \n Go to the Django admin panel and check that the account has a\n \n JSON\n \n dict on the field\n \n subscription\n \n . If it does, it means the connection is set, you can now finalize the trial by setting a past end date in the\n \n Trial ends at\n \n field (or by just removing trial start and end dates).\n \n \n \n \n Add credit to customer\n \n \n Once the subscription was created, the customer will start generating a debt.\nAs soon as Datacoves receives a check or wire, a Stripe Admin needs to register it on the Django Admin, as follows:\nNote that credits have a validity period, during that period the developer licences or services specified will be discounted from the invoice.\n \n \n \n Go to Accounts, select the customer's account and edit it.\n \n \n Scroll down until you see the\n \n Credits\n \n area.\n \n \n Click on\n \n Add another credit\n \n and complete the required fields including as much information as possible in the reference field.\n \n \n Click on\n \n Save\n \n .\n \n \n \n \n \n \n \n \n F.A.Q.\n \n \n \n \n How do I configure my local environment to test Stripe?\n \n \n First of all, you need to set to\n \n True\n \n the feature\n \n accounts_signup\n \n on the only record you have in the\n \n Cluster\n \n model.\n \n \n Then, if you're using\n \n datacoveslocal.com\n \n and you were granted permissions automatically to the\n \n local\n \n account, you need \nto remove all the permissions to such account, doing that the Datacoves UI will allow you creating a new account using the\nsetup wizard.\n \n \n You should also set\n \n setup enabled\n \n on\n \n True\n \n on the admin panel for you user.\n \n \n Then, navigate to https://datacoveslocal.com/account-setup/ and follow the instructions to create an account using Stripe.\n \n \n \n \n How do I run the stripe webhooks locally to test billing integration?\n \n \n Run\n \n ./cli.py stripe_webhooks\n \n and follow the instructions.\n \n \n \n \n How to sync stripe live products with test products?\n \n \n Sometimes you modified the live products (prices/descriptions) and you need to update the test ones.\n \n \n Just run\n \n ./cli.py copy_to_stripe_test\n \n to run the live -> test sync process."},"23":{"url":"/docs_output/how-tos/celery-monitoring.html","snip":"Edit on github\n \n \n \n \n \n \n Celery monitoring\n \n \n For authoritative, more detailed information, see\n \n celery's monitoring guide\n \n .\n \n \n \n \n UI\n \n \n We run the flower UI at\n \n https://flower.{cluster_domain}\n \n . You can see executed\ntasks by clicking on tasks, or navigating to\n \n https://flower.{cluster_domain}/tasks\n \n .\nYou'll want to sort tasks to see the latest Started or Received at the top.\nYou can filter by task using the Search input. The UI doesn't refresh live.\nIncreasing the number of shown entries can be helpful.\n \n \n \n \n CLI\n \n \n From a core-api pod (\n \n kcc exec -it $api_pod_name -- bash\n \n ) you can invoke\ncelery inspect. One useful thing to do is check the stats.\n \n celery -A datacoves inspect stats\n\n \n Here's an excerpt from the output.\n \n ...\n \"total\": {\n \"billing.tasks.inform_billing_events\": 113,\n \"billing.tasks.tally_account_resource_usage\": 1,\n \"billing.tasks.tally_resource_usage\": 1,\n \"celery.backend_cleanup\": 1,\n \"clusters.workspace.sync_task\": 1211,\n \"iam.tasks.clear_tokens\": 1,\n \"iam.tasks.remove_missing_user_groups\": 1,\n \"notifications.tasks.send_slack_notification\": 7,\n \"projects.tasks.delete_unused_project_keys\": 1,\n \"projects.tasks.remove_unused_environments\": 1,\n \"projects.tasks.remove_unused_user_volumes\": 1,\n \"projects.tasks.stop_sharing_codeservers\": 38,\n \"projects.tasks.turn_off_unused_workspaces\": 1134\n },\n \"uptime\": 68132\n...\n\n \n The uptime is 68132 seconds, and the sync_task has run 1211 times, so there's\nbeen one run every 56 seconds in average."},"24":{"url":"/docs_output/how-tos/choose-ec2-nodes.html","snip":"Edit on github\n \n \n \n \n \n \n Choosing an ec2 instance type and estimating pods per node\n \n \n \n AWS docs.\n \n \n \n \n \n Pod limit from network constraints\n \n \n Every pod must have an IP. EC2 instances have a maximum number of IPs, which\nlimits the number of pods per node.\n \n source\n \n \n \n With CNI version 1.9 or higher and nitro instances,\n \n the pod limit can be increased\n \n .\nFor example:\n \n $ ./max-pods-calculator.sh --instance-type m5.large --cni-version 1.9.0\n29\n\n $ ./max-pods-calculator.sh --instance-type m5.large --cni-version 1.9.0 --cni-prefix-delegation-enabled\n110\n\n# For ensembledev.apps.jnj.com:\n$ ./max-pods-calculator.sh --instance-type m5.4xlarge --cni-version 1.7.1\n110\n\n \n \n List of ENI and IP limits per instance type\n \n .\n \n \n \n \n Pod limit from volume attachment limits\n \n \n Currently some of our pods (code-server) require ELB volumes. EC2 instances have\na maximum number of volumes that can be attached. For \"most\" nitro instances, the\nsum of ENIs, volume attachments and instance store volumes must be less than 28.\n \n source\n \n . Volume attachments seem capped by 26 because the mount\npoints use the a letter of the alphabet each."},"25":{"url":"/docs_output/how-tos/codeserver-images.html","snip":"Edit on github\n \n \n \n \n Check the versions, but these are the standard Datacoves VS Code extensions:\n \n \n SQLFluff is a SQL linter with dbt support\nhttps://datacoves-vs-code-images.s3.amazonaws.com/dorzey.vscode-sqlfluff-3.2.0.vsix\n \n \n This extensions adds grid (excel like) editing for CSV files\nhttps://datacoves-vs-code-images.s3.amazonaws.com/janisdd.vscode-edit-csv-0.10.0.vsix\n \n \n Standard VS Code Python extension\nhttps://datacoves-vs-code-images.s3.amazonaws.com/ms-python.python-2024.14.1.vsix\n \n \n This adds yml validations\nhttps://datacoves-vs-code-images.s3.amazonaws.com/redhat.vscode-yaml-1.15.0.vsix\n \n \n This adds \"short cuts\" to VS Code like the \"run current model\" and \"more..\" button\nhttps://datacoves-vs-code-images.s3.amazonaws.com/RobertOstermann.better-status-bar-1.0.9.vsix\n \n \n This adds Jinja support, I think it is dbt-jinja\nhttps://datacoves-vs-code-images.s3.amazonaws.com/samuelcolvin.jinjahtml-0.20.0.vsix\n \n \n This adds items to the file context menu like \"Duplicate\"\nhttps://datacoves-vs-code-images.s3.amazonaws.com/sleistner.vscode-fileutils-3.10.3.vsix\n \n \n This adds spell checking\nhttps://datacoves-vs-code-images.s3.amazonaws.com/streetsidesoftware.code-spell-checker-3.0.1.vsix\n \n \n This is our Power User Extension that adds things like query preview and near real time linting\nhttps://datacoves-vs-code-images.s3.amazonaws.com/vscode-datacoves-power-user-0.9.16.vsix\n \n \n Python Ruff linter, main use case is to show vars and imports not being used in a .py file\nhttps://datacoves-vs-code-images.s3.amazonaws.com/charliermarsh.ruff-2024.56.0.vsix\n \n \n This adds colors to each column of a CSV file\nhttps://datacoves-vs-code-images.s3.amazonaws.com/mechatroner.rainbow-csv-3.3.0.vsix\n \n \n This is part of the Datacoves install for Snowflake Envs\nhttps://datacoves-vs-code-images.s3.amazonaws.com/snowflake.snowflake-vsc-1.10.5.vsix\n \n \n SQLTools I cant find where you got this from on github and it no longer in Orrum since I deleted it.\nIt is used on non-Snowflake envs like Artemis\n \n \n This is a chat gpt extension that is NOT our default, but has been added in a few places, like orrum and cold bore. Datacoves co-pilot will make this obsolete\nhttps://datacoves-vs-code-images.s3.amazonaws.com/timkmecl.chatgpt-1.1.2.vsix"},"26":{"url":"/docs_output/how-tos/connect-to-kenvue-cluster-using-a-bastion.html","snip":"Edit on github\n \n \n \n \n \n \n How to connect to kenvue cluster using a bastion\n \n \n \n \n SSH to bastion\n \n \n ssh\n \n @AWSWEXNVAL0001.kenvue.com\n \n \n \n \n \n Set up your user enviornment\n \n \n Install kubectl and aws-iam-authenticator\n \n mkdir bin\ncd bin\ncurl -Lo aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.5.9/aws-iam-authenticator_0.5.9_linux_amd64\nchmod +x aws-iam-authenticator\n\ncd ..\ncurl -Lo kuberlr.tar.gz https://github.com/flavio/kuberlr/releases/download/v0.4.2/kuberlr_0.4.2_linux_amd64.tar.gz\ntar -xzvf kuberlr.tar.gz\n\ncd kuberlr_0.4.2_linux_amd64/\nmv kuberlr ../bin/\ncd ../bin\nln -s kuberlr kubectl\ncd ..\n\n \n \n \n Configure your ~/.kube/config\n \n mkdir .kube\ncat << EoF > .kube/config2\napiVersion: v1\nclusters:\n- cluster:\n server: https://BD0F1A58014FCF446B668A876EE7DF2A.gr7.us-east-1.eks.amazonaws.com\n certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1UQXlOVEV4TlRNMU1Gb1hEVE15TVRBeU1qRXhOVE0xTUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT2JpCmFhOUFvSDVlWGpMeFdnQzBONE5JUHVQSVptNmpLNmxBM29sTVAwUHYyd1hlalphcEFsVnFOWVdxcHl3aCtZZm8KT1lLR1Nuc2hPdE9DbnVyU094SVhoY1BnR1ZmN1REVlZGbU04WW5KSzBmOHdLWmxLdDNIYU9oWFJkekNZYkJoMgoydnpZSGx0ZGREbHkvTHpwaWpNQlpNRHY1UUtkeEhNSEF0aUd6aG4xS2xvT2xkRGozV1lpV1VJV0ladzZheWV2CnNhYm1Rd3A1REJwQjBVN3V2bEdMd1RUQ3RZc3NhdnI2dDZ6MWtzNHhNUUMxVTlONUlHV0UxdEUrZGZwMmZzWDYKZ3d1c0tEOGNESkFiVmFrL2lwK3pkcXRxRnJHOVFNeDBEelpQYzRtU1dnVDZyVXZjbTlBbTlrMVNsSXc5ODlGRApHelh6bGxQcXZySWNnU1RWSW9jQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZLNnJEeXBRK3VReGgxWU8zS0JKbmthYU1TNUdNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCdk52clZjRjFaZ1FDMzNpbDZrR0gzcHJJN3RWRmcvOTF3UVNZZkM2SFM2cWRiVERucwpNYXhoeEYvblZzbFEyKzRmN0UxVUZodUdsOUdUZlVvS2FiQzB1cWx6bUpQaDJVUXJRZ3hZQnd3eGxTOSszcHJNCnlUOGZ5M29uM21jaWR0azZlSllIcm5wZS9QZnlWN1J5eUhva0pVVGIwcWFVakxoMVZHVFoyRmJLK0ZjeG50SHcKdWJ4bnlSMHZlcGExdDFoOVljNDFJYnFzUGRBMVFDZVYvR1hNdWN4Z0U4bUd1VFZQQlU1MEdYbG1qWnRZVjg5dgp3TVpYTVVobzNmakdQNVVnMnlFTmtXaW9Ra2hqUkRMRUZGQXpZUzMrSU5TWnAwMklBUTRRNkNSYnJ0Vmc5ZDFrCkY4d1FzaytJUXUrMnE3T25WOUs5cUdYeXdrakNSd0ZTV1N2UwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==\n name: kubernetes\ncontexts:\n- context:\n cluster: kubernetes\n user: aws\n name: aws\n- context:\n cluster: kubernetes\n user: aduser\n name: user\ncurrent-context: aws\nkind: Config\npreferences: {}\nusers:\n- name: aws\n user:\n exec:\n apiVersion: client.authentication.k8s.io/v1beta1\n command: aws-iam-authenticator\n args:\n - \"token\"\n - \"-i\"\n - \"itx-wcr-datacove-development\"\n - \"-r\"\n - \"arn:aws:iam::551241293703:role/itx/service/EKS/VPCxEKSRole\"\n- name: aduser\n user:\n auth-provider:\n config:\n apiserver-id: \"22f9d484-b818-4b21-a278-00b264446505\"\n client-id: \"22f9d484-b818-4b21-a278-00b264446505\"\n environment: AzurePublicCloud\n tenant-id: \"7ba64ac2-8a2b-417e-9b8f-fcf8238f2a56\"\n name: azure\nEoF\n\n \n \n \n Connect to cluster\n \n kubectl get nodes"},"27":{"url":"/docs_output/how-tos/custom-dns.html","snip":"Edit on github\n \n \n \n \n \n \n About this Documentation\n \n \n Some customers (like Orrum) require a custom internal DNS. This will require adding a new coredns custom config map:\n \n apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: coredns-custom\n namespace: kube-system\ndata:\n sftp.orrum.com.server: |\n sftp.orrum.com:53 {\n forward . 172.31.150.10 172.31.160.20\n }\n\n \n Change 'sftp.orrum.com' to whatever pattern needs to go to the custom DNS, and the IP addresses to the addresses of the DNS servers to resolve the address.\n \n \n Then you can patch the coredns deployment:\n \n kubectl -n kube-system patch deployment coredns \\\n --type='json' \\\n -p='[\n {\n \"op\": \"add\",\n \"path\": \"/spec/template/spec/volumes/-\",\n \"value\": {\n \"name\": \"custom-coredns\",\n \"configMap\": {\n \"name\": \"coredns-custom\"\n }\n }\n },\n {\n \"op\": \"add\",\n \"path\": \"/spec/template/spec/containers/0/volumeMounts/-\",\n \"value\": {\n \"name\": \"custom-coredns.server\",\n \"mountPath\": \"/etc/coredns/custom\"\n }\n }\n ]'\n\n \n Then restarts the deployment:\n \n kubectl rollout restart deployment coredns -n kube-system\n\n \n And test with nslookup:\n \n kubectl -n core exec -ti workbench-c6599969b-k4p5w -- nslookup sftp.orrum.com"},"28":{"url":"/docs_output/how-tos/datacoves-versioning.html","snip":"Edit on github\n \n \n \n \n \n \n Datacoves versioning\n \n \n We use\n \n semantic versioning\n \n in all our images, and datacoves releases.\n \n \n \n MAJOR.MINOR.PATCH\n \n \n \n where\n \n MAJOR.MINOR\n \n are read from\n \n .version.yaml\n \n and used every time a new image is\npushed to docker repository and\n \n PATCH\n \n is autogenerated (timestamp).\n \n \n \n \n Our criteria\n \n \n \n \n When do we bump the\n \n MAJOR\n \n version?\n \n \n When we make incompatible changes or we introduce compatible changes but deprecate features:\n \n \n \n Any python library upgrade (including dbt) that requires changes in the customer's analytics(dbt) git repo\n \n \n Airbyte, Airflow, DataHub, Superset upgrades that require reconfiguration\n \n \n Datacoves core changes that require human intervention\n \n \n Airbyte, Airflow, DataHub, Superset that do not require reconfiguration, but several features are being deprecated\n \n \n \n \n \n When should we bump the\n \n MINOR\n \n version?\n \n \n \n When we make compatible changes, such as new features or upgrade dependencies\n \n \n Patch version changes to dbt e.g. 1.8.3 to 1.8.5\n \n \n Compatible updates to dbt e.g. 1.7.x to 1.8.x\n \n \n Compatible update to Airbyte, Airflow, DataHub, Superset that do not require reconfiguration\n \n \n \n \n \n Everything else is a\n \n PATCH\n \n \n \n \n Bug fixes, performance enhancements\n \n \n \n \n \n Images tags\n \n \n Images are pushed with the folling tags:\n \n \n \n MAJOR\n \n \n MAJOR.MINOR\n \n \n MAJOR.MINOR.PATCH\n \n \n MAJOR.MINOR.PATCH-\\<commit sha>\n \n \n latest\n \n \n \n CI servers that eventually use datacoves images could reference any of them, depending on how specific they need to be.\n \n \n \n \n Releases\n \n \n Releases follow the same versioning criteria, they are generated by running the\n \n ./cli.py generate_release\n \n command, or by triggering the\n \n Generate Release\n \n GitHub workflow."},"29":{"url":"/docs_output/how-tos/debug-airflow-workers.html","snip":"Edit on github\n \n \n \n \n \n \n Debug Airflow Workers\n \n \n \n \n How to review if there are error in git-sync/s3-sync containers?\n \n \n We have already enabled the functionality in\n \n git-sync\n \n to retry a maximum of three times. If the synchronization with\n \n git-sync\n \n or\n \n s3-sync\n \n is not successful, the worker will fail, therefore the Airflow task will also fail.\n \n \n To get the logs from\n \n git-sync\n \n or\n \n s3-sync\n \n we need to filter by namespace and by container. Below are some examples of how to do it.\n \n \n \n Go to\n \n Grafana\n \n i.g.\n \n https://grafana.<domain>\n \n \n \n \n Go to\n \n Explore\n \n select\n \n Loki\n \n datasource and perform the query with the following filters:\n \n \n \n \n \n Namespace\n \n =\n \n dcw-my-slug-environment\n \n \n \n \n \n Container\n \n =~\n \n git-sync\n \n \n /\n \n s3-sync**/\n \n \n \n Examples:\n \n # git-sync\n{namespace=\"dcw-dnr240\", container=\"git-sync\"} |= `` \n\n# s3-sync\n{namespace=\"dcw-dnr240\", container=\"s3-sync\"} |= `` \n\n \n \n \n \n \n \n How to get Airflow workers?\n \n \n \n Go to\n \n Grafana\n \n i.g.\n \n https://grafana.<domain>\n \n \n \n \n Go to\n \n Explore\n \n select\n \n Loki\n \n datasource and perform the query with the following filters:\n \n \n \n \n \n Namespace\n \n =\n \n dcw-my-slug-environment\n \n \n \n \n \n Pod\n \n =~\n \n my-slug-environmet-airflow-scheduler.\n \n *\n \n \n \n Line contains\n \n |=\n \n my-task\n \n \n \n \n \n Note: Remember that you have to adjust the date and time parameters depending on the search you want to perform.\n \n \n \n E.g.:\n \n {namespace=\"dcw-prd001\", pod=~\"prd001-airflow-scheduler.*\"} |= `t_id_MDM_extraction_V_ENS2_SALES_ADJUSTMENTS_streamsets`\n\n \n \n \n \n \n Copy the pod name\n \n \n \n Go to\n \n Explore\n \n select\n \n Loki\n \n and perform the query with the following filters:\n \n \n \n \n \n Namespace\n \n =\n \n dcw-my-slug-environment\n \n \n \n \n \n Pod\n \n =\n \n pod-name\n \n \n \n \n E.g.:\n \n {namespace=\"dcw-prd001\", pod=\"emeaelmdmprdtidmdmextractionve-295567f106ff46139ad4edf24e52fc31\"} |= ``"},"30":{"url":"/docs_output/how-tos/debug-dbt-errors-when-return-code-is-not-zero.html","snip":"Edit on github\n \n \n \n \n \n \n How to debug dbt on production environments, i.e. Airflow?\n \n \n Sometimes when you run a dbt command on the command line, i.e.\n \n dbt deps\n \n ,\n \n dbt compile\n \n , there are silent errors, and you just got an errorcode > 0.\n \n \n To debug it, you should run it programatically using python:\n \n \n \n \n Run python in the command line\n \n $ python\n\n \n \n \n Run the desired command right in the python console\n \n from dbt.cli.main import dbtRunner, dbtRunnerResult\n\n# initialize\ndbt = dbtRunner()\n\n# create CLI args as a list of strings\ncli_args = [\"deps\"]\n\n# run the command\nres: dbtRunnerResult = dbt.invoke(cli_args)\n\n# inspect the results\nfor r in res.result:\n print(f\"{r.node.name}: {r.status}\")\n\n \n To know more, see https://docs.getdbt.com/reference/programmatic-invocations."},"31":{"url":"/docs_output/how-tos/grafana-grant-permisions.html","snip":"Edit on github\n \n \n \n \n \n \n Grant permissions to Grafana\n \n \n \n Go to\n \n Django admin groups\n \n .\n \n \n Edit a group that has your user.\n \n \n Search\n \n Grafana\n \n permissions and\n \n Choose all\n \n (See image).\n \n \n Save the group.\n \n \n Go to\n \n Grafana"},"32":{"url":"/docs_output/how-tos/grafana-loki-storage-config-providers.html","snip":"Edit on github\n \n \n \n \n \n \n Grafana Loki Storage\n \n \n \n \n Providers\n \n \n \n \n AWS S3\n \n \n \n \n Azure Blob Storage\n \n \n \n \n \n \n AWS S3\n \n \n \n \n Permission\n \n \n Limited: List, Read, Write\n \n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:PutObject\",\n \"s3:GetObject\",\n \"s3:GetObjectVersion\",\n \"s3:DeleteObject\",\n \"s3:DeleteObjectVersion\"\n ],\n \"Resource\": \".../*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": \"s3:ListBucket\",\n \"Resource\": \"...\"\n }\n ]\n}\n\n \n \n \n Create and Configure Life Cycle\n \n \n \n Find and select\n \n S3 Services\n \n .\n \n \n Click on\n \n Create Bucket\n \n .\n \n \n \n General configuration\n \n you must choose the region y the name of bucket. Other values can be defaulted.\n \n \n We need to create two\n \n Lifecycle rules\n \n to rotate our logs.\n \n \n Select the new Bucket and then select\n \n Management\n \n tab.\n \n \n Click\n \n Create lifecycle rule\n \n .\n \n \n \n Lifecycle rule configuration\n \n you have to fill in the name: e.g:\n \n Delete all fake objects after 30 days\n \n .\n \n \n \n Prefix\n \n you fill in\n \n fake/\n \n \n \n \n Lifecycle rule actions\n \n you has to check\n \n Expire current versions of objects\n \n ,\n \n \n \n Days after object creation\n \n you must complete\n \n 30\n \n \n \n Save changes.\n \n \n \n Lifecycle rule configuration\n \n you have to fill in the name: e.g:\n \n Delete all index objects after 30 days\n \n \n \n \n Prefix\n \n you fill in\n \n index/\n \n \n \n \n Lifecycle rule actions\n \n you has to check\n \n Expire current versions of objects\n \n ,\n \n \n \n Days after object creation\n \n you must complete\n \n 30\n \n \n \n Save changes.\n \n \n \n \n We must have two rules.\n \n \n \n \n \n \n \n Example of a rule.\n \n \n \n \n \n \n \n \n \n \n \n Azure Blob Storage\n \n \n \n \n Create and configure Azure Blob Storage\n \n \n \n Create new resource\n \n Storage account\n \n .\n \n \n Select your\n \n Subscription\n \n and\n \n Resource group\n \n .\n \n \n Complete the\n \n Storage account name\n \n .\n \n \n Click\n \n Review\n \n (Other values can be defaulted).\n \n \n Click\n \n Create\n \n (Other values can be defaulted).\n \n \n \n \n \n \n \n Select your new\n \n Storage account\n \n .\n \n \n Click on\n \n Containers\n \n and add new container.\n \n \n \n \n \n \n \n \n \n \n Select\n \n Lifecycle management\n \n and\n \n Add a rule\n \n to create new rule to rotate our logs.\n \n \n \n \n \n \n \n On\n \n Details\n \n tab we must complete the name (Delete all objects after 30 days) and select\n \n Limit blobs with filter\n \n .\n \n \n \n \n \n \n \n On\n \n Filter set\n \n tab we must add two\n \n Blob prefix\n \n :\n \n <container-name>/fake/\n \n and\n \n <container-name>/index/\n \n .\n \n \n \n \n \n \n \n Click\n \n Create\n \n \n \n \n \n \n Get configuration data\n \n \n \n \n Account name\n \n is the name of\n \n storage account\n \n .\n \n \n Click on\n \n Account key\n \n (Key1)\n \n \n \n \n \n \n \n Select your\n \n Container\n \n and then\n \n Properties"},"33":{"url":"/docs_output/how-tos/grafana-loki-storage-config.html","snip":"Edit on github\n \n \n \n \n \n \n Grafana Loki Storage Configuration\n \n \n There are three different providers to configure\n \n Loki\n \n storage:\n \n \n \n AWS S3\n \n \n Azure Blob Storage\n \n \n Minio (Local development)\n \n \n \n \n \n Notes\n \n \n \n Minio is not responsible for log rotation, the logs lifecycle must be configured in your provider.\n \n \n How to configure the provider?\n \n here\n \n \n \n \n To configure the cluster you must add the configuration to the configuration repository as a secret in\n \n <domain>/cluster-params.secret.yaml\n \n for example to our local environment\n \n datacoveslocal.com/cluster-params.secret.yaml\n \n \n \n \n \n Minio (Local development)\n \n grafana:\n ...\n loki:\n provider: minio\n password: ...\n\n \n \n \n AWS S3\n \n grafana:\n ...\n loki:\n provider: aws\n region: <us-east-1>\n access_key: ...\n secret_key: ...\n bucket: <bucket-name>\n\n \n \n \n Azure Blob Storage\n \n grafana:\n ...\n loki:\n provider: azure\n account_name: ...\n account_key: ...\n container_name: <container-name>\n endpoint_suffix: <blob.core.windows.net>"},"34":{"url":"/docs_output/how-tos/hotfix.html","snip":"Edit on github\n \n \n \n \n \n \n How to Create a Hotfix\n \n \n A hotfix is defined as doing a targetted fix to an existing release. The idea behind a hotfix is to do the absolute minimum change to correct a high priority issue in a live release.\n \n \n To create a hotfix, one must first do the fix. First, create a branch from the release tag you wish to hot fix. Let's say you're hot-fixing release 'TAG_NAME'. You would do the following commands:\n \n git fetch --all --tags\ngit checkout -b BRANCH_NAME refs/tags/TAG_NAME\n\n \n You will now have a branch that is a copy of the release tag. You can either do your hotfix work directly on that branch and merge it to main later, or you can use\n \n git cherry-pick\n \n to pick commits from the main branch onto your new branch. If you need to use cherrypick and you don't know how, that is a larger topic than I want to cover here; Stephen can help you directly with that.\n \n \n Once you have done your work, you should\n \n commit\n \n to your branch and then compare your branch to the original tag. This will make sure you only changed what was needed:\n \n git diff BRANCH_NAME..refs/tags/TAG_NAME\n\n \n This command\n \n is very important if you cherry-pick\n \n to make sure you don't accidentally bring additional features or code that you do not intend to. However, it is good practice to review all code going into a hotfix very carefully.\n \n \n Once you are certain your hotfix is good,\n \n push\n \n it to the git repository. Now you're ready to build a hotfix release with cli.py. Do the following command:\n \n ./cli.py generate_hotfix\n\n \n It will first show you\n \n git status\n \n to make sure your code is committed. Make sure there are no extra files or anything you don't want built into the release docker image present in your code tree.\n \n \n After you confirm, it will ask you which release you are making a hotfix from. This release must already be present in your\n \n releases/\n \n directory; if it is not, download the release with\n \n ./cli.py download_releases\n \n or download the appropriate manifest directly from github.\n \n \n Then, it will ask you which images you wish to build. Select one or more images to build, or none if you are changing another dependency.\n \n \n After that, it will ask you if you want to change the version of any other image that is in the release. You can select none if you only want to build new images and you don't need to change any other dependencies.\n \n \n Finally, it will build your release and push it up as a draft in github. From that point, it is a normal release and you can take it through the normal process to get it installed."},"35":{"url":"/docs_output/how-tos/how-to-create-a-ssl-certificate.html","snip":"Edit on github\n \n \n \n \n \n \n How to create an SSL certificate\n \n \n \n \n Install\n \n acme.sh\n \n \n \n \n \n Configure the\n \n cloudflare API token\n \n (getting\n \n CF_Key\n \n and\n \n CF_Email\n \n from 1Password).\n \n \n \n \n Run:\n \n \n \n # Let's Encrypt issuer\n# https://github.com/acmesh-official/acme.sh/wiki/Server\nacme.sh --issue --server letsencrypt --dns dns_cf -d <DOMAIN> --debug 2\n\n# then\nacme.sh --issue --server letsencrypt --dns dns_cf -d '*.<DOMAIN>' --debug 2\n\n \n \n Get certificate information (Optional)\n \n \n openssl x509 -text -noout -in <cert>\n\n \n \n \n Copy ceftificates\n \n \n \n \n Use\n \n <DOMAIN>/fullchain.cer\n \n and\n \n <DOMAIN>/<DOMAIN>.key\n \n as the root certificate and private key. Usually copied then to\n \n base/root.cer\n \n and\n \n base/root.key\n \n .\n \n \n \n Also, use\n \n *.<DOMAIN>/fullchain.cer\n \n and\n \n *.<DOMAIN>/<DOMAIN>.key\n \n as the wildcard certificate and private key. Usually copied then to\n \n base/wildcard.cer\n \n and\n \n base/wildcard.key\n \n ."},"36":{"url":"/docs_output/how-tos/","snip":"Edit on github"},"37":{"url":"/docs_output/how-tos/install-python-reqs-on-jnj-bastion.html","snip":"Edit on github\n \n \n \n \n \n \n Install python requirements on bastion in JNJ\n \n wget --no-check-certificate https://bootstrap.pypa.io/pip/3.6/get-pip.py && python3 get-pip.py --user\n\n \n Then, cd into the datacoves_deployment cloned repo folder, and run:\n \n pip install -r requirements.txt"},"38":{"url":"/docs_output/how-tos/list-code-server-pods-processes.html","snip":"Edit on github\n \n \n \n \n \n \n List python processes running on certain namespace's code server pods\n \n #!/bin/bash\nns=\"dcw-dev001\npods=$(kubectl -n $ns get pods | grep code-server | awk '{print $1, $8}')\nfor pod in $pods; do\n kubectl -n $ns exec -ti $pod -- bash -c 'ps auxwf' | grep python\ndone"},"39":{"url":"/docs_output/how-tos/make-and-install-a-release.html","snip":"Edit on github\n \n \n \n \n \n \n Make a new release\n \n \n To make a new release, from your development machine:\n \n cluster_domain=ensembletest.apps.jnj.com\n\n# Generate a new release.\ngit checkout main\ngit pull\n\n# Check that images are properly created in Github Actions\n./cli.py generate_release\nrelease= # The name of the release just generated.\n\n# [If release is targeted to a submodule customer]\n# Check if any there's any config change requirement\n./cli.py combined_release_notes # Inspect the output to check for configuration changes\n\n# Update the cluster configuration to reference the new release.\n./cli.py set_release\ncd config/$cluster_domain/\ngit secret reveal -f # Only required if you modified secrets.\nchange configuration as required # Only required if you modified secrets.\ngit secret hide # Only required if you modified secrets.\ngit add -A\ngit diff --cached # Review what will be commited.\ngit commit\ngit push\n\n# Commit and push the changes to datacoves.\ncd ../..\ngit add -A\ngit diff --cached\ngit commit\ngit push\n\n \n \n \n Apply the release to a cluster\n \n \n \n \n Localhost\n \n ./cli.py install\n\n \n \n \n JNJ\n \n \n For jnj there's a git repository, datacoves_deployment, that mirrors the structure of\nthe datacoves repo but only contains scripts and configuration, not sources.\n \n \n To deploy first update the mirror:\n \n # Clone if needed.\nmkdir -p ../jnj/asx-ahrx/datacoves_deployment\ngit clone ssh://git@sourcecode.jnj.com:3268/asx-ahrx/datacoves_deployment.git ../jnj/asx-ahrx/datacoves_deployment\n\n# Rsync the installer files into the datacoves_deployment repo\n./cli.py rsync_installer ../jnj/asx-ahrx/datacoves_deployment/\n\n# Point the config submodule to the latest version.\ncd config/$cluster_domain/\ngit pull\ncd ../..\n\n# Commit the changes.\ngit add -A\ngit diff --cached\ngit commit\n\n \n SSH into a jnj machine with kubectl access to the cluster. Then follow\n \n datacoves_deployment\n \n 's\n \n documentation\n \n to run the installation scripts."},"40":{"url":"/docs_output/how-tos/manage-profiles-and-image-sets.html","snip":"Edit on github\n \n \n \n \n \n \n Managing profiles and image sets\n \n \n \n \n How to create and use a profile + image set?\n \n \n \n \n 1. Create profile\n \n \n A profile is used to create a reusable preconfigured environment.\n \n \n \n Navigate to\n \n profiles admin page\n \n and create a new one clicking on \"Add Profile\".\n \n \n Review the checkboxes and uncheck the ones that are not appropiate, you might like to keep them all checked as suggested.\n \n \n Add profile files accordingly. You might like to copy the exact same profile files configured on the\n \n default profile\n \n .\n \n \n \n \n \n 2. Create image set\n \n \n Image sets are associated to profiles and they are used to build the images that will end up being used by code-server and/or airflow.\n \n \n \n Navigate to the\n \n Image set admin page\n \n and click on \"Create new image set\".\n \n \n Choose the profile you just created in\n \n Profile\n \n .\n \n \n Choose the release from where the new images are going to be based on, typically the last release.\n \n \n Set the common python requirements for both airflow and code-server images in the\n \n Python requirements\n \n field. Take a look at the help text under the field.\n \n \n Set the specific python requirements for airflow or code server in the fields\n \n Airflow requirements\n \n or\n \n Code server requirements\n \n .\n \n \n Finally, configure the extensions you need installed in code-server by adding urls to the .vsix files in the\n \n Code server extensions\n \n field.\n \n \n Hit \"Save and continue editing\".\n \n \n Click on \"Build image set\" button in the top right corner of the form. A background process will be triggered to build the images.\n \n \n Keep refreshing the page every 1 minute until the field\n \n Images\n \n get populated with the final images cooked.\n \n \n \n \n \n 3. Start using you profile\n \n \n Once you profile and image set are ready, you need to edit the environment you want to change and set the corresponding\n \n profile\n \n in such field. Environments are edited\n \n here\n \n .\n \n \n \n \n \n \n \n 4. Reload the workbench page\n \n \n That's all, reload the page and don't forget to prepare your\n \n mate\n \n to enjoy your analytics journey even more ;)"},"41":{"url":"/docs_output/how-tos/move-a-gpg-secret-key.html","snip":"Edit on github\n \n \n \n \n \n \n How to move a gpg secret key\n \n \n You should not reuse private gpg keys without thinking. However, it is more\nconvenient to have a single private key for your jnj email that is in all the\ngit secret keyrings of all the cluster config repos that you have access to.\n \n \n An easy way to transfer a key to a new installation server is to copy and paste\nits base64:\n \n # From the machine that already has the key:\ngpg --list-secret-keys\ngpg --export-secret-key youremail@its.jnj.com | base64\n# Copy the output.\n\n # From the installation machine:\ncat | base64 -d > key.asc\n# Paste and hit control D.\ngpg --import key.asc\ngpg --list-secret-keys\nrm key.asc"},"42":{"url":"/docs_output/how-tos/onboard-a-new-project-on-datacoves.html","snip":"Edit on github\n \n \n \n \n \n \n 1. Create service accounts on snowflake (manually).\n \n \n \n svc_datacoves: to change user private key\n \n \n svc_orchestration: airflow jobs\n \n \n svc_loader: airbyte/fivetran jobs\n \n \n svc_continuous_integration: CI jobs\n \n \n svc_business_intelligence: BI tool connection (optional)\n \n \n svc_business_intelligence_pii: BI tool connection for PII data (optional)\n \n \n \n \n \n 2. Create user accounts on snowflake (manually)\n \n \n \n \n 3. New project on appdevtools (on JnJ):\n \n \n \n Bitbucket\n \n \n Jenkins\n \n \n Confluence\n \n \n \n \n \n 4. Configure git service account access to repo\n \n \n \n \n 5. Add SQL hook and template to set users private key on snowflake\n \n \n \n \n 6. Create git repo structure using balboa repo as a reference:\n \n \n \n load\n \n \n orchestrate\n \n \n automate\n \n \n dbt\n \n \n profiles.yml\n \n \n sample_blue_green.py\n \n \n docs\n \n \n secure\n \n \n .gitignore\n \n \n \n Depending on CI:\n \n \n \n .github\n \n \n .gitlab-ci.yml\n \n \n Jenkinsfile\n \n \n \n CI job deploy to prod that:\n \n \n \n generate dbt docs on dbt-docs branch\n \n \n runs dbt build on prod\n CI job on PR that:\n \n \n validate branch names\n \n \n run pre-commit hooks\n \n \n \n \n \n 7. Add airbyte connection on airflow\n \n \n \n \n 8. Add new branch “airflow_\n \n ” for every env that is not\n \n production\n \n \n \n \n \n \n 9. New dbt-docs branch\n \n \n \n \n 10. Jenkins configuration\n \n \n \n Git SA\n \n \n Snowflake SA\n \n \n \n \n \n 11. Enable dbt-docs once index.html was placed on dbt-docs branch"},"43":{"url":"/docs_output/how-tos/prometheus-queries.html","snip":"Edit on github\n \n \n \n \n \n \n Useful prometheus queries\n \n \n \n \n node status with pressure\n \n sum by(node) (kube_node_status_condition{status=\"true\", condition=\"DiskPressure\"}) +\nsum by(node) (kube_node_status_condition{status=\"true\", condition=\"MemoryPressure\"}) +\nsum by(node) (kube_node_status_condition{status=\"true\", condition=\"PIDPressure\"})\n\n \n \n \n pods memory filtering by pod name with regex\n \n sum by(pod) (container_memory_usage_bytes{namespace=\"<NAMESPACE>\", pod=~\"<PREFIX>.*\"})\n\n \n \n \n containers cpu usage by node\n \n sum by(node) (rate(container_cpu_usage_seconds_total{node=\"<NODE>\"}[5m]))\n\n \n \n \n Node memory\n \n node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100\n\n \n \n \n Loki ingester chunk stored size\n \n loki_ingester_chunk_stored_bytes_total{job=\"loki\"}\n\n \n \n \n Pods killed bec exceeding memory limit\n \n sum by(pod) (kube_pod_container_status_terminated_reason{reason=\"OOMKilled\", namespace=\"dcw-prd001\"})\n\n \n \n \n Total worker nodes (measued by nodes running airflow worker pods)\n \n count (sum by (node) (kube_pod_info and on (pod) kube_pod_labels{label_airflow_worker!=\"\"}) > 0)"},"44":{"url":"/docs_output/how-tos/q-and-a.html","snip":"Edit on github\n \n \n \n \n \n \n Questions and Answers\n \n \n These are simple items that don't necessarily fit in elsewhere or need their own articles.\n \n \n \n \n How do I start codeserver without validating the git repository credentials?\n \n \n Code servers use User Repository settings, and currently User Repositories only work with SSH keys. Sometimes, this is hard to deal with; if we can only use https authentication (i.e. from within J&J pulling an external repository) and we need a work-around.\n \n \n The workaround is simple; go to the Django panel.\n \n \n Pick User Repositories\n \n \n Pick the correct User Repository for your user and repo.\n \n \n Put a date and time in the \"validated at\" field and save it. So long as that isn't blank, it will allow you to start code server."},"45":{"url":"/docs_output/how-tos/recover-disk-on-aks.html","snip":"Edit on github\n \n \n \n \n \n \n Recover disk (PV) from Azure Kubernetes Service.\n \n \n This guide describes how to move a disk from one Kubernetes cluster to another cluster.\n \n More info\n \n \n \n Steps:\n \n \n \n \n Edit old pvc to Retain policy.\n \n \n \n \n Get PV name.\n \n \n \n \n Delete PVC to release the PV in the old cluster.\n \n \n \n \n Move the PV resource to new cluster using az cli.\n \n \n \n \n Delete the PVC in the new cluster.\n \n \n \n \n Create the PV and PVC in the new cluster.\n \n \n \n \n \n \n Edit old pvc to Retain policy\n \n \n The\n \n persistent volume (PV)\n \n that are created for\n \n code server\n \n has the delete policy, that means that when a disk is unbounded it is automatically deleted, therefore this policy must be modified to\n \n Retain\n \n .\n \n # Get the persistent volumes. E.g:\nkubectl get pv\n\n# Edit the persistent volume. E.g:\nkubectl patch pv pvc-2552cd9b-8231-409d-8b4b-a9d047415b53 -p '{\"spec\":{\"persistentVolumeReclaimPolicy\":\"Retain\"}}'\n\n \n \n \n Get PV name\n \n # Get the persistent volumes. E.g:\nkubectl get pv\n\n \n \n \n Delete PVC to release the PV in the old cluster\n \n \n It is necessary to remove the\n \n persistent volume claim (PVC)\n \n to release the\n \n persistent volume (PV)\n \n .\n \n # Get the persistent volumes. E.g:\nkubectl -n dcw-dev123 get pvc\n\n# Edit the persistent volume. E.g:\nkubectl -n dcw-dev123 delete pvc code-server-bru-10-config-volume\n\n \n \n \n Move the PV resource to new cluster using az cli\n \n \n \n Get the\n \n cluster name\n \n and\n \n subcription id\n \n .\n \n \n \n \n \n \n \n Get the node resources groups. We will need the origin and destination.\n \n \n # Get the node resources group\naz aks show --resource-group <name-resource-group> --name <cluser-node> --query nodeResourceGroup -o tsv\n\n \n \n Get the id disk.\n \n \n # Get the origien node resource group. E.g:\naz disk list --resource-group <node-resouorce-group>\n\n \n \n \n \n \n Move the disk.\n \n \n az resource invoke-action --action moveResources --ids \"/subscriptions/<origin-subcription-id>/resourceGroups/<origin-node-resource-group>\" --request-body \"{ \\\"resources\\\": [\\\"<disk_id>\\\"],\\\"targetResourceGroup\\\":\\\"/subscriptions/<destination-subcription-id>/resourceGroups/<destination-node-resource-group>\\\" }\"\n\n \n \n \n Delete the PVC in the new cluster.\n \n \n This step is only necessary if the\n \n persistent volume claim (PVC)\n \n already exists.\n \n # Get the persistent volumes. E.g:\nkubectl -n dcw-dev123 get pvc\n\n# Edit the persistent volume. E.g:\nkubectl -n dcw-dev123 delete pvc code-server-bru-10-config-volume\n\n \n \n \n Create the PV and PVC in the new cluster\n \n \n Create the following file\n \n pvc.yaml\n \n with the names and namespace correct.\n \n \n \n \n pv-name\n \n : E.g:\n \n pvc-2581bfb0-b56a-4fbd-b302-67cf0ab43499\n \n \n \n \n pvc-name\n \n : If you deleted the pvc, the name should be the same. E.g:\n \n code-server-bru-10-config-volume\n \n \n \n \n namespace\n \n : Kubernetes namespace to be applied.\n \n \n \n disk-id-full-path\n \n : E.g:\n \n /subscriptions/91bd2205-0d74-42c9-86ad-41cca1b4822b/resourceGroups/MC_datacoves_east-us-a_eastus/providers/Microsoft.Compute/disks/pvc-fddcd2fc-7d35-40e9-b631-49c64bd87cbf\n \n \n \n apiVersion: v1\nkind: PersistentVolume\nmetadata:\n name: <pv-name>\nspec:\n capacity:\n storage: 20Gi\n accessModes:\n - ReadWriteOnce\n persistentVolumeReclaimPolicy: Retain\n storageClassName: default\n csi:\n driver: disk.csi.azure.com\n readOnly: false\n volumeHandle: <disk-id-full-path>\n\n---\napiVersion: v1\nkind: PersistentVolumeClaim\nmetadata:\n name: <pvc-name>\n namespace: <namespace>\nspec:\n accessModes:\n - ReadWriteOnce\n resources:\n requests:\n storage: 20Gi\n volumeName: <pv-name>\n storageClassName: default\n\n \n Create the resources in Kubernetes\n \n kubectl apply -f pvc.yaml\n\n# Check the resources\nkubectl get pvc | grep <pv-name> # pvc-2552cd9b-8231-409d-8b4b-a9d047415b53\nkubectl -n dcw-dev123 get pvc code-server-bru-10-config-volume"},"46":{"url":"/docs_output/how-tos/register-github-self-hosted-runner.html","snip":"Edit on github\n \n \n \n \n \n \n Self hosted Github Runner\n \n \n \n Create new runnner\n \n in Github\n \n . You must have\n \n Owner\n \n privileges.\n \n \n Create a virtual machine, e.g. in Azure, and run the scritps that Github gave you on the previous step.\n \n \n Install dependencies on the machine you created\n \n \n # Update and Upgrade\nsudo apt-get update\nsudo apt-get upgrade -y\n\n# Add Kubernetes repository and key\ncurl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -\necho \"deb https://apt.kubernetes.io/ kubernetes-xenial main\" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list\n\n# Add Helm repository and key\ncurl https://baltocdn.com/helm/signing.asc | sudo apt-key add -\necho \"deb https://baltocdn.com/helm/stable/debian/ all main\" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list\n\n# Update package list again after adding the Kubernetes and Helm repositories\nsudo apt-get update\n\n# Install software/packages\nsudo apt-get install -y apt-transport-https gnupg2 kubectl tmux python3-pip docker.io golang helm\n\n# Python symbolic link\nsudo ln -s /usr/bin/python3 /usr/bin/python\n\n# Docker post-installation step for the current user\nsudo usermod -aG docker $USER\n\n# Go and kind installation\ngo install sigs.k8s.io/kind@v0.20.0\nsudo ln -s /home/datacoves/go/bin/kind /usr/local/bin/kind\n\n \n \n run\n \n tmux\n \n to do not close the session when detached from ssh connection.\n \n \n Follow any instruction you got from Github on step 1 and install the runner as a service:\n \n sudo ./svc.sh install datacoves\n \n \n \n Boost inotify limits for system performance. Update the following values in the specified files:\n ```Boost inotify limits for system performance. Update the following values in the specified files:\n~$ cat /proc/sys/fs/inotify/max_user_instances\n1024\n~$ cat /proc/sys/fs/inotify/max_user_watches\n524288\n~$ cat /proc/sys/fs/inotify/max_queued_events\n16384\n```"},"47":{"url":"/docs_output/how-tos/release-notes.html","snip":"Edit on github\n \n \n \n \n \n \n Statement of Purpose\n \n \n The purpose of this document is to describe the process by which we manage release notes to deliver to our customers.\n \n \n \n \n Source of Authority\n \n \n Release notes all come from Github:\n \n \n https://github.com/datacoves/datacoves/releases\n \n \n The notes begin live as auto-generated notes that are created when the release branch is built. Then, we hand-edit the release notes to match the following format:\n \n Breaking Changes\n* Items that are breaking changes, in list.\n\nNew Features\n* New features, in list.\n\nEnhancements\n* Enhancements to old features, in list\n\nFixes\n* Bug fixes, in list\n\nUnder the Hood\n* Notes relevant to us internally which we would like to keep, but not important to customers.\n\n**Full Changelog**: This is a URL that is provided automatically, just leave it in the change log.\n\n \n \n \n Generating Release Notes\n \n \n Release notes are generated per-customer and have all the changes from their current release to the latest release you currently have downloaded in your 'releases' folder. Make sure you have the customer's cluster configuration checked out into your 'config' directory; if you do not, stop and ask for help before continuing.\n \n \n You can control which release notes are generated; make sure you have downloaded the releases first:\n \n ./cli.py download_releases\n\n \n If desired or necessary, you can delete files out of your 'releases' directory; for instance, if the customer is getting updated to the latest 2.2 series release but there are 2.3 series releases available, you could delete all the 2.3 release files out of your 'releases' directory and notes for those releases will not be produced.\n \n \n Release notes are then generated using the\n \n cli.py\n \n thusly:\n \n ./cli.py combined_release_notes\n\n \n It will make a file\n \n combined.md\n \n in the same directory as\n \n cli.py\n \n , and that will have the combined release notes for all the releases involved. This file can then be delivered to the customer as part of the announcement to upgrade them."},"48":{"url":"/docs_output/how-tos/request-access-to-a-cloud-pc-on-kenvue.html","snip":"Edit on github\n \n \n \n \n \n \n How to request access to a cloud PC on Kenvue\n \n \n \n \n Navigate to this\n \n form\n \n .\n \n \n \n \n Complete it accordingly:"},"49":{"url":"/docs_output/how-tos/reset-datahub.html","snip":"Edit on github\n \n \n \n \n \n \n Resetting Datahub\n \n \n Datahub uses PostgreSQL, ElastiCache, and Kafka. If any of these three things gets out of sync for any reason, Datahub will behave very strangely. For instance, it will claim secrets exist but not show them up in the UI.\n \n \n In such an event, you will need to reset Datahub. This can be done with the following steps:\n \n \n In all these examples, replace\n \n xxx\n \n with the slug (such as dev123).\n \n \n \n \n Turn Off Datahub\n \n \n Go to the environment you wish to reset, and disable Datahub. Save and sync the environment and wait until Datahub come offline by monitoring the Datahub pods:\n \n kubectl get pods -n dcw-xxx | grep datahub\n\n \n This will take awhile.\n \n \n \n \n Delete Metadata in PostgreSQL\n \n ./cli.py pod_sh\n./manage.py dbshell\n\\c xxx_dh\ndrop table metadata_aspect_v2\n\n \n \n \n Delete Persistent Volume Claims\n \n kubectl delete pvc -n dcw-xxx elasticsearch-master-elasticsearch-master-0\nkubectl delete pvc -n dcw-xxx data-xxx-kafka-broker-0\nkubectl delete pvc -n dcw-xxx data-xxx-kafka-zookeeper-0\n\n \n \n \n Verify Persistent Volumes are deleted\n \n kubectl get pv -n dcw-xxx | grep xxx | grep elasticsearch\nkubectl get pv -n dcw-xxx | grep xxx | grep kafka\n\n \n These should show no results. These should delete automatically when the PVC is deleted, make sure they are gone.\n \n \n \n \n Re-enable Datahub\n \n \n Go back to the environment, turn Datahub back on, and re-sync."},"50":{"url":"/docs_output/how-tos/security-vulnerabilities-fix.html","snip":"Edit on github\n \n \n \n \n \n \n How to run security vulnerabilities check and fix them\n \n \n \n \n React app\n \n \n \n \n Install\n \n yarn add yarn-audit-fix -D\n\n \n \n \n Run\n \n yarn-audit-fix\n\n \n Learn more: https://yarnpkg.com/package?name=yarn-audit-fix\n \n \n \n \n Django app\n \n \n \n \n Install\n \n pip install pip-audit\n\n \n \n \n Run\n \n pip-audit -r ./requirements.txt"},"51":{"url":"/docs_output/how-tos/set-maintenance-mode.html","snip":"Edit on github\n \n \n \n \n \n \n How to set the cluster in \"Maintenance Mode\"\n \n \n Turning it on:\n \n ./cli.py set_maintenance_mode <kubectl context> <cluster domain> \"on\" \"today at 9PM UTC\" \"support@datacoves.com\" \"our Support Team\"\n\n \n Turning it off:\n \n ./cli.py set_maintenance_mode <kubectl context> <cluster domain> \"off\""},"52":{"url":"/docs_output/how-tos/setup-oauth-on-azure.html","snip":"Edit on github\n \n \n \n \n \n \n How to set up oAuth authentication on Azure\n \n \n \n \n NOTE:\n \n This guide was based on this\n \n Auth0 help page\n \n , it could require some adjustments.\n \n \n \n This is done using Azure AD / Entra ID apps.\n \n \n \n \n Register new app\n \n \n \n Navigate to App registrations on Azure Portal\n \n \n \n \n \n \n \n Register a new App, choosing a name, selecting \"Accounts in this organizational directory only (Datacoves Inc. only - Single tenant)\" \nand providing a redirect url in the form of \"https://api.{cluster_domain}/complete/azuread-tenant-oauth2\"\n \n \n \n \n \n \n \n Once created, get the client id and tenant id from the overview page\n \n \n \n \n \n \n \n \n Generate Client Secret\n \n \n Navigate to 'Certificates & Secrets' and Generate a new client secret\n \n \n \n \n \n Keep the value safe.\n \n \n \n \n Configure permissions\n \n \n Navigate to app permissions and then 'Add permissions'. Select 'Microsoft Graph', then 'Delegated permissions', and the following OpenId permissions.\n \n \n \n \n \n Also add permissions to read groups memberships if they're going to be used to determine permissions in Datacoves.\n \n \n \n \n \n Finally, consent as an Admin the permissions granted by clicking on this button:\n \n \n \n \n \n \n \n Configure token\n \n \n We need to include the groups claim in both the ID and access token, to do so, go to Token configuration:\n \n \n \n \n \n Click on \"Add groups claim\", select \"Security groups\", make sure \"Group ID\" is selected in both ID and Access tokens and click on Add.\n \n \n \n \n \n \n \n Configure Datacoves\n \n \n Configure the Client ID, Tenant ID and Client Secret accordingly on Datacoves using the env variables AZUREAD_CLIENT_ID, AZUREAD_TENANT_ID, and AZUREAD_CLIENT_SECRET."},"53":{"url":"/docs_output/how-tos/setup-s3-for-dbt-api.html","snip":"Edit on github\n \n \n \n \n \n \n Create a S3 bucket for dbt api artifacts\n \n \n \n \n Create bucket on AWS console\n \n \n \n Create an S3 bucket.\n \n \n Choose a bucket name, we suggest using\n \n _dbt_api where\n \n could be\n \n ensemble\n \n ,\n \n ensembletest\n \n , etc.\n \n \n \n \n Create an IAM user with a policy to access the bucket, like the one below,\n replacing\n \n {your_bucket_name}\n \n with your bucket's name.\n \n \n Create an access key for the user. Share it with the Datacoves team.\n \n \n {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:PutObject\",\n \"s3:GetObject\",\n \"s3:GetObjectVersion\",\n \"s3:DeleteObject\",\n \"s3:DeleteObjectVersion\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}/*\"\n },\n {\n \"Effect\": \"Allow\",\n \"Action\": [\n \"s3:ListBucket\",\n \"s3:GetBucketLocation\"\n ],\n \"Resource\": \"arn:aws:s3:::{your_bucket_name}\"\n }\n ]\n}\n\n \n \n \n Configure Datacoves accordingly\n \n \n For the cluster being configured, set the following environment variables in the\n \n core-dbt-api.env\n \n file:\n \n STORAGE_ADAPTER=s3\nS3_BUCKET_NAME=fill_in\nS3_ACCESS_KEY=fill_in\nS3_SECRET_ACCESS_KEY=fill_in\nS3_REGION=fill_in"},"54":{"url":"/docs_output/how-tos/testing-alerts.html","snip":"Edit on github\n \n \n \n \n \n \n How to create and test alerts\n \n \n \n \n Stack\n \n \n \n Alert Manager\n \n \n Loki Alert Ruler\n \n \n Grafana\n \n \n \n \n \n Test Loki Alert\n \n \n \n Add the new alert on\n \n scripts/data/loki-rules.yaml\n \n file.\n \n \n Install\n \n Observability Stack\n \n .\n \n \n Force some logs.\n \n \n \n Example:\n \n # Option 1\nkubectl -n core exec -it api-75567b8958-7b7rx -- bash\n\n# Option 2\n./cli.py pod_sh\n\n./manage.py shell_plus\n\n import requests\nimport time\n\npayload = {\n \"streams\": [\n {\n \"stream\": {\n \"agent_hostname\": \"eventhandler\",\n \"job\": \"test\",\n \"namespace\": \"core\"\n },\n \"values\": [[ str(int(time.time() * 1e9)), \"max node group size reached\" ]]\n }\n ]\n}\n\nrequests.post(\n url=\"http://loki-loki-distributed-gateway.prometheus.svc.cluster.local/loki/api/v1/push\",\n json=payload,\n headers={\"Content-Type\": \"application/json\"}\n)\n\n \n \n Now you can see the alert on\n \n Cluster Alerts"},"55":{"url":"/docs_output/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.html","snip":"Edit on github\n \n \n \n \n \n \n How to trigger a cloudx pipeline manually after changing cluster.yml on a kenvue cluster\n \n \n \n Go to the bastion\n \n \n Run the curl command you can find in 1Password named\n \n Run cloudx pipelines using curl on Kenvue clusters\n \n \n \n Not that the\n \n Branch\n \n queryparam references the repo branch you changed."},"56":{"url":"/docs_output/how-tos/update-kubernetes-and-datacoves.html","snip":"Edit on github\n \n \n \n \n \n \n Statement of Purpose\n \n \n The purpose of this document is to describe common upgrade procedures for both updating Kubernetes and updating Datacoves on customer clusters.\n \n \n \n \n Updating Kubernetes\n \n \n The procedure varies for Azure vs. AWS. We generally prefer to use the web console to do the upgrade.\n \n \n \n \n Gain Kubernetes command line access to the cluster\n \n \n Make sure you are set up for Kubernetes command line access.\n \n \n \n For Orrum the instructions are here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum\n \n \n \n Access whatever VPN is necessary. Switch to the correct Kubernetes context:\n \n kubectl config get-contexts\nkubectl config use-context context-name\n\n \n If you aren't set up to do this, stop now and get help.\n \n \n \n \n Disable Sentry Alarms\n \n \n Sentry is going to complain very loudly about all this.\n \n \n Currently, it looks like there is no way to disable this without the Sentry Business Plan which we do not have. But if that ever changes, we'll update this section.\n \n For now, there is nothing to do.\n \n \n \n \n \n Check and Prepare PDB's\n \n \n The Kubernetes PDBs can cause an upgrade to hang, as it will prevent a pod from shutting down to receive the update. Check the PDBs like this:\n \n kubectl get pdb -A\n\n \n You will get an output similar to:\n \n NAMESPACE NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE\ncalico-system calico-typha N/A 1 1 273d\ncore api 1 N/A 0 232d\ncore beat 1 N/A 0 232d\ncore redis 1 N/A 0 232d\ncore workbench 1 N/A 0 232d\ncore worker 1 N/A 0 232d\ndcw-dev123 dev123-airflow-scheduler-pdb N/A 1 1 26h\ndcw-dev123 dev123-airflow-webserver-pdb N/A 1 1 26h\nkube-system coredns-pdb 1 N/A 1 273d\nkube-system konnectivity-agent 1 N/A 1 273d\nkube-system metrics-server-pdb 1 N/A 1 273d\n\n \n Note the core namespace clusters with ALLOWED DISRUPTIONS at 0. You will need to patch those so that they will allow a disruption, and then revert the patch when done.\n \n \n The following commands will allow for a disruption:\n \n kubectl patch pdb -n core api -p '{\"spec\":{\"minAvailable\":0}}'\nkubectl patch pdb -n core beat -p '{\"spec\":{\"minAvailable\":0}}'\nkubectl patch pdb -n core redis -p '{\"spec\":{\"minAvailable\":0}}'\nkubectl patch pdb -n core workbench -p '{\"spec\":{\"minAvailable\":0}}'\nkubectl patch pdb -n core worker-long -p '{\"spec\":{\"minAvailable\":0}}'\nkubectl patch pdb -n core worker-main -p '{\"spec\":{\"minAvailable\":0}}'\nkubectl patch pdb -n core dbt-api -p '{\"spec\":{\"minAvailable\":0}}'\nkubectl patch pdb -n prometheus cortex-tenant -p '{\"spec\":{\"minAvailable\":0}}'\n\n \n You can apply this to any other PDBs that prevent disruptions.\n \n Take note of all the PDBs that you altered in this fashion.\n \n \n \n \n \n Upgrade Kubernetes\n \n \n This varies based on the cloud provider.\n \n \n \n \n On Azure\n \n \n Go to:\n \n \n https://portal.azure.com/#view/HubsExtension/BrowseResource/resourceType/Microsoft.ContainerService%2FmanagedClusters\n \n \n Make sure you are logged into the correct client account (check the upper right corner).\n \n \n Locate the cluster you want to work with. Often you will have to alter the default filters so that \"Subscription equals all\".\n \n \n Pick the cluster you are updating. If you are not sure which one, ask.\n \n \n On the overview screen that comes up by default, you will see \"Kubernetes version\" in the upper right area. Click the version number.\n \n \n It will show version details; click Upgrade Version.\n \n \n \n Pick Automatic upgrade: Enabled with patch (recommended)\n \n \n Pick Kubernetes version: the version you wish to upgrade to\n \n \n Pick upgrade scope: Upgrade control plane + all node pools\n \n \n Click save\n \n \n \n The upgrade will start in a few moments.\n \n \n \n \n Wait for it to come back\n \n \n The update can take quite awhile. Keep an eye on the pods and watch them update:\n \n kubectl get pods -A\n\n \n You will see a lot of activity, pods shutting down and restarting. Once it's all back online, you can restore the PDBs (see next step) and you can verify the update (see bottom of this file).\n \n \n \n \n Restore PDB's\n \n \n We need to put the PDB's back in place.\n \n kubectl get pdb -A\n\n \n You will get an output similar to:\n \n NAMESPACE NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE\ncalico-system calico-typha N/A 1 1 273d\ncore api 0 N/A 1 232d\ncore beat 0 N/A 1 232d\ncore redis 0 N/A 1 232d\ncore workbench 0 N/A 1 232d\ncore worker 0 N/A 1 232d\ndcw-dev123 dev123-airflow-scheduler-pdb N/A 1 1 26h\ndcw-dev123 dev123-airflow-webserver-pdb N/A 1 1 26h\nkube-system coredns-pdb 1 N/A 1 273d\nkube-system konnectivity-agent 1 N/A 1 273d\nkube-system metrics-server-pdb 1 N/A 1 273d\n\n \n The following commands will re-enable the PDBs:\n \n kubectl patch pdb -n core api -p '{\"spec\":{\"minAvailable\":1}}'\nkubectl patch pdb -n core beat -p '{\"spec\":{\"minAvailable\":1}}'\nkubectl patch pdb -n core redis -p '{\"spec\":{\"minAvailable\":1}}'\nkubectl patch pdb -n core workbench -p '{\"spec\":{\"minAvailable\":1}}'\nkubectl patch pdb -n core worker-main -p '{\"spec\":{\"minAvailable\":1}}'\nkubectl patch pdb -n core worker-long -p '{\"spec\":{\"minAvailable\":1}}'\nkubectl patch pdb -n core dbt-api -p '{\"spec\":{\"minAvailable\":1}}'\nkubectl patch pdb -n prometheus cortex-tenant -p '{\"spec\":{\"minAvailable\":1}}'\n\n \n Also restore any additional PDBs you had to disable in the prior step.\n \n \n \n \n Updating DataCoves\n \n \n Updating DataCoves is relatively simple. However, some of the access details can be compllicated.\n \n \n \n \n First Time Setup: Set Up Deployment Environment and Get Needed Access\n \n \n J&J, Kenvue, and Orrum have some complexity around access. AKS access is relatively easy. These are one-time steps you need to take to get access to each environment.\n \n \n \n \n AKS\n \n \n Accessing AKS is documented here: https://github.com/datacoves/datacoves/blob/main/docs/how-tos/administrate-east-us-a-aks-cluster.md\n \n \n Installation is done using your development system's checked out copy of the Datacoves repository. AKS' configuration repository is located at: https://github.com/datacoves/config-datacoves-east-us-a and should be checked out into your 'config' directory.\n \n \n \n \n Orrum\n \n \n Accessing Orrum is documented here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum\n \n \n Installation is done using your development system's checked out copy of the Datacoves repository. Note that Orrum requires a VPN, but the access is described above. Orrum's configuration repository is here: https://github.com/datacoves/config-datacoves-orrum and must be checked out into your 'config' directory.\n \n \n \n \n CCS\n \n \n To access CCS, your Datacoves account must be added to CCS' Azure organization. Eugine Kim can assist with this.\n \n \n Then, you must download and install the Azure VPN client. For Macs, this is done through the Apple Store.\n \n \n And finally, you need the Azure command line tools which you probably already have installed if you followed our README instructions for setting up this repository. You should also be logged into Azure with\n \n az login\n \n .\n \n \n Then, on the VPN, you can shell into the Bastion as follows:\n \n az ssh vm --subscription 3099b8af-7ca1-4ff4-b9c5-1960d75beac7 ssh vm --ip 10.0.2.4\n\n \n Once on the Bastion, the tools are installed with Linux Brew: So, edit your\n \n .bashrc\n \n file in your home directory with your favorite editor and add this to the end:\n \n eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)\n\n \n Log out and log back in.\n \n python3 --version\n \n should reveal a modern\n \n 3.1x\n \n python version.\n \n \n From this point, it is simply check out the datacoves repository and do the installation like any other system.\n \n \n \n \n J&J / Kenvue\n \n \n J&J access is complex; going into the details of all the setup is out of the scope of this documentation. However, we will cover how to get set up on the bastion so you can get to work.\n \n \n It is a good idea to read this documentation if you haven't already: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/jnj\n \n \n In order to do deployments in J&J or Kenvue, you have to do the work from a bastion server, which is a Linux machine accessible via your Cloud PC. J&J and Kenvue have different bastions, however configuring them is basically the same.\n \n \n The IP address for the J&J Bastion is:\n \n 10.157.82.138\n \n and the IP address for the Kenvue bastion is: (... I am unable to log into Kenvue right now! Great!)\n \n \n I make a\n \n .bat\n \n file that runs\n \n ssh IP\n \n where the IP is the one above.\n \n \n Once you log into the bastion, there's a few things to note:\n \n \n \n You can sudo to root thusly:\n \n sudo su -\n \n . Any other\n \n sudo\n \n command will not work, you can only\n \n sudo su -\n \n .\n \n \n The default home directory you log into on the bastion does not have much disk space, so we use a volume mount on\n \n /app\n \n for most of our work.\n \n \n We use\n \n brew\n \n to manage packages.\n \n \n \n To get set up initially, take the following steps:\n \n \n \n \n Copy base configuration\n \n \n \n cp -R /app/users/datacoves-home-template/. ~/\n \n \n \n \n \n Add brew to your bash rc\n \n \n Edit your\n \n .bashrc\n \n file in your home directory with your favorite editor and add this to the end:\n \n eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)\n\n \n Log out and log back in.\n \n python3 --version\n \n should reveal a modern\n \n 3.1x\n \n python version.\n \n \n \n \n Login to Kubernetes\n \n kubectl config get-contexts\n\n \n \n \n Set up your deployment repository\n \n sudo su -\nmkdir -p /app/users/$USER\nchown -R $USER /app/users/$USER\nexit\ncd /app/users/$USER\ngit clone https://github.com/datacoves/datacoves.git\ncd datacoves\npython3 -m venv .venv\nsource .venv/bin/activate\npip3 install -r requirements.txt\n\n \n \n \n Set up your configuration repository\n \n \n For each environment you will deploy to, you need to check out its config repository into your 'configs' directory. The list of repositories is here:\n \n \n https://github.com/datacoves/datacoves/blob/main/docs/client-docs/jnj/1-cluster-requirements.md\n \n \n \n \n Before Deployment: Create your Plan\n \n \n Before a deployment is done, you must first check to see if there's any special installation steps. I use a Word document template, and I update it according to each release adding any special steps that I need to. Then I print it out and use it as a physical check list. My template file is\n \n here\n \n .\n \n \n First, look at the version of the cluster you will be updating. You can get this version from the cluster-params.yaml. The easiest way to do this is to check the difference between two versions in GitHub. Here's an example of a comparison between two versions:\n \n \n https://github.com/datacoves/datacoves/compare/v3.2.202410250048...v3.2.202411140044\n \n \n Look at all the pull requests that are in your new releae and check to see if you have any that are labeled \"special release step\" and add any special steps to your release document. Post your finished work on the Slack dev channel for commentary.\n \n \n \n \n Perform the installation\n \n \n Release documentation is here: https://www.notion.so/datacoves/Release-Instructions-1b5ea827f87280f98620dccc1600727c\n \n Be very sure you are releasing from the correct release branch\n \n . You need to release from the tag you are releasing. You can check out a tag thusly:\n \n git fetch -a\ngit checkout refs/tags/v1.2.34234523452524\n\n \n Replace the tag name with the version you are deploying. If you deploy from main or the wrong branch, you risk using installation scripts that are newer and have features that aren't supported yet by the images you are edeploying.\n \n \n \n \n How to run migrations on a stuck install process\n \n \n Sometimes migrations do not run automatically because the new pod containing the migrations fails before they can be applied. When this occurs we need to execute them manually. So we need to remove the\n \n LivenessProbe\n \n and\n \n ReadinessProbe\n \n , this makes the new pod run correctly and allows us to enter it and execute the migrations ourselves.\n \n kubectl patch deployments -n core api -p '{\"spec\": {\"template\": {\"spec\": {\"containers\":[{\"name\": \"api\", \"livenessProbe\": null, \"readinessProbe\": null}]}}}}'\n\n \n When the pod run correctly.\n \n kubectl -n core get pods\nkubectl -n core exec -it api-<hash> -- bash\n./manage.py migrate\n\n \n \n \n Create Profile Image Set for New Release\n \n \n This may be necessary if an error about Profile Image Sets occurs; it is a bit of a chicken and the egg problem, as the release needs to exist prior to creating the profile image set, but the release won't exist until the install process is attempted.\n \n \n Log into the customer's API panel.\n \n \n \n Orrum's is: https://api.datacoves.orrum.com/panel\n \n \n CCS' is: https://api.datacoves.cssperfusion.com/panel\n \n \n \n Under \"Projects\" pick \"Profile Image Sets\". Go to the existing Profile Image Set for the old release, and copy / paste the 4 JSON blocks into an editor. Take a note of what is in the 'profile' field.\n \n \n Go back to the listing of Profile Image Sets and click\n \n + Add profile image set\n \n in the corner. Make the profile the same as the previous release's, and choose the new release from the release select box.\n \n \n Then, paste in the four JSON blocks into the new Profile Image Set. Check your release YAML file in\n \n releases\n \n and note the section 'code_server_libraries'; compare that to the Python libraries in the profile image set. Update versions as needed, but never downgrade. There's no need to add libraries that are in the release YAML but not in the profile image entry.\n \n \n Also check 'code_server_extensions' against 'code server extensions' and apply the same logic to update extensions that are in the Profile Image Set.\n \n \n Save the new profile image set, and making sure to keep all the data from the old profile image set just in case you need it, go back into that one and delete it.\n \n \n You can now re-run installation and it should get past this error.\n \n \n \n \n Verify Installation\n \n \n Verifying the installation is the same no matter what process you're engaging in with DataCoves clusters, be it a Kubernetes update or a DataCoves update.\n \n \n \n Make sure no helm chart failed and retry if needed:\n \n ./cli.py retry_helm_charts\n \n \n \n Log into the customer's API panel and make sure that is working.\n \n \n Log into the customer's launchpad and make sure that is working.\n \n \n Pick one of the customer's environments and make sure you can get into it.\n \n \n Try to use code server (\"Transform\")\n \n \n Open a terminal in code server and run\n \n dbt-coves --version\n \n \n \n Try to use Airflow (\"Orchestrate\")\n \n \n Look at logs in one of the DAGs\n \n \n \n \n \n If your user does not have permission to get into the customer's cluster, temporarily add yourself to the necessary groups to check the cluster."},"57":{"url":"/docs_output/how-tos/update-ssl-certificates.html","snip":"Edit on github\n \n \n \n \n \n \n Statement of Purpose\n \n \n The purpose of this document is to describe the process of upgrading SSL certificates for customers that are using custom certificates (i.e. not using Let's Encrypt).\n \n \n \n \n Step 1: Prepare and Verify Certificate Files\n \n \n \n This should be done soon after certificate files are received, and not last minute.\n \n \n \n Ultimately, we need the following files:\n \n \n \n root.cer\n \n \n root.secret.key\n \n \n wildcard.cer\n \n \n wildcard.secret.key\n \n \n \n The root.cer is the certificate for the root domain, i.e. datacoves.orrum.com\n \n \n wildcard.cer is a wildcard, i.e. *.datacoves.orrum.com\n \n \n All of these files should be in pem format; the cer files should have the complete keychain. A pem format looks like this:\n \n -----BEGIN CERTIFICATE-----\nMIIEjTCCAvWgAwIBAgIQQ71EG0d4110tqpc8I8ur/jANBgkqhkiG9w0BAQsFADCB\npzEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMT4wPAYDVQQLDDVzc2Fz\nc2lAU2ViYXN0aWFucy1NYWNCb29rLVByby5sb2NhbCAoU2ViYXN0aWFuIFNhc3Np\n....\nJbszQlyzkyzBxQ5eiK3OUNdsB+n5Zo+TshRRL45wA9fZmvAizzmtehxJWUbidGL7\neqqMWqdt11MTLJ3feOjGlryMFO6TIt/aH/91VkoLyVhsemuk5LukZ1nIxoWvzHcf\ny2cC+I3F8bWbYkRr92fmb8A=\n-----END CERTIFICATE-----\n\n \n There should be several BEGIN / END certificate blocks in wildcard.cer and root.cer file; the wildcard.csr and root.csr files should have a complete certificate stack and should be suspect if they only contain a single certificate block.\n \n \n The key files will have a slightly different header, looking like this:\n \n -----BEGIN PRIVATE KEY-----\nMIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCLf9Q17CQlOWDB\nCwWOuzL4+aalFwj2PR+OTuPnjHCI8stDedvmy5jtxSkdAL+5PgNu7ZJbKFhbODgT\n...\nOpuSfWnGVhOmii2aiYePtvNqDsLQv59MUxpUi8R6aw/XhG2Vb7t14+hbmUtRScUV\nLcGdNBdJyB8NaHYR/sNF1w==\n-----END PRIVATE KEY-----\n\n \n \n If you receive a pfx format file, we cover that in a section below. Read that section and go through those steps, then return to this section to complete verification.\n \n \n \n You can verify the certs with the following commands:\n \n # Verify root\nopenssl crl2pkcs7 -nocrl -certfile root.cer | openssl pkcs7 -print_certs -noout -text\n\n# Verify wildcard\nopenssl crl2pkcs7 -nocrl -certfile wildcard.cer | openssl pkcs7 -print_certs -noout -text\n\n \n And you will see several blocks with a Certificate header. One block should contain the host name for the certificate. In our example, datacoves.orrum.com:\n \n Certificate:\n Data:\n Version: 3 (0x2)\n Serial Number:\n 01:cb:00:21:05:34:94:76:2b:f8:68:cf:8a:09:4c:02\n Signature Algorithm: sha256WithRSAEncryption\n Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1\n Validity\n Not Before: Apr 22 00:00:00 2024 GMT\n Not After : Apr 21 23:59:59 2025 GMT\n Subject: CN=datacoves.orrum.com\n\n \n Note the hostname under 'Subject'; make sure that is the correct host. root will appear as above, as a single host name; wildcard should look like this instead:\n \n Certificate:\n Data:\n Version: 3 (0x2)\n Serial Number:\n 0d:7f:e3:36:2c:db:b0:65:78:9a:c1:88:f8:06:12:4f\n Signature Algorithm: sha256WithRSAEncryption\n Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1\n Validity\n Not Before: Apr 22 00:00:00 2024 GMT\n Not After : Apr 21 23:59:59 2025 GMT\n Subject: CN=*.datacoves.orrum.com\n\n \n Note the * symbol there in the subject. Also take note of the issuer;\n \n CN=Thawte TLS RSA CA G1\n \n .\n \n \n Elsewhere in the certificate output, you should see a certificate for the issuer, such as:\n \n Certificate:\n Data:\n Version: 3 (0x2)\n Serial Number:\n 09:0e:e8:c5:de:5b:fa:62:d2:ae:2f:f7:09:7c:48:57\n Signature Algorithm: sha256WithRSAEncryption\n Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2\n Validity\n Not Before: Nov 2 12:24:25 2017 GMT\n Not After : Nov 2 12:24:25 2027 GMT\n Subject: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1\n\n \n Note the subject matches the issuer name. And finally, this certificate has an issuer as well; make sure that one is in the file. In this case,\n \n DigiCert Global Root G2\n \n . In our example, you can find it here:\n \n Certificate:\n Data:\n Version: 3 (0x2)\n Serial Number:\n 03:3a:f1:e6:a7:11:a9:a0:bb:28:64:b1:1d:09:fa:e5\n Signature Algorithm: sha256WithRSAEncryption\n Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2\n Validity\n Not Before: Aug 1 12:00:00 2013 GMT\n Not After : Jan 15 12:00:00 2038 GMT\n Subject: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2\n\n \n Note again the 'subject' line. Typically PEM files will have certificates in the following order:\n \n \n \n Host's certificate\n \n \n One or More Intermediate\n \n \n Root certificate\n \n \n \n If you have to assemble a certificate from multiple parts, please be aware that this is the recommended ordering; however I don't think it will cause an error if you get the ordering wrong.\n \n \n Once your certificates are in order, you can verify the key with the following commands:\n \n openssl rsa -check -noout -in wildcard.secret.key\nopenssl rsa -check -noout -in root.secret.key\n\n \n Both should say:\n \n RSA key is okay\n \n \n \n Now compare the modulus of the key and the cert:\n \n # These two should match\nopenssl rsa -modulus -noout -in wildcard.secret.key | openssl md5\nopenssl x509 -modulus -noout -in wildcard.cer | openssl md5\n\n# And these two should match\nopenssl rsa -modulus -noout -in root.secret.key | openssl md5\nopenssl x509 -modulus -noout -in root.cer | openssl md5\n\n \n If the modulus doesn't match, it may be because the server certificate isn't the first certificate in the .cer file. Make sure the order is correct and try again.\n \n \n \n \n Converting pfx format files\n \n \n We have received files in pfx format instead of pem and these require special handling. Follow the following directions to convert them to usable cer and key files, then use the following commands:\n \n # Assuming we have files wildcard.pfx and root.pfx\n#\n# Note: The --legacy option seems to be needed for most people, however\n# some are able to do this without --legacy ... you can try without\n# it first if you want.\n#\n# You will be asked for an \"Import Password\" -- just hit enter to skip that\n# If you get an error after the Import Password, you need --legacy\n\nopenssl pkcs12 -in wildcard.pfx -cacerts -out wildcard_ca.cer -nodes -nokeys --legacy\nopenssl pkcs12 -in root.pfx -cacerts -out root_ca.cer -nodes -nokeys --legacy\n\n \n Edit the wildcard.cer and root.cer files, and remove the header above\n \n -----BEGIN CERTIFICATE-----\n \n . This header will resemble this:\n \n Bag Attributes: <No Attributes>\nsubject=C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1\nissuer=C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2\n\n \n \n WARNING: Check the ENTIRE file, as there will probably be multiple of the headers. Any text not between\n \n -----BEGIN CERTIFICATE-----\n \n and\n \n -----END CERTIFICATE-----\n \n must be removed!\n \n \n \n Next, you need to extract the server certs, thusly:\n \n # See notes above regarding --legacy and \"Import Password\"\n\nopenssl pkcs12 -in wildcard.pfx -clcerts -nokeys -out wildcard.single.cer --legacy\nopenssl pkcs12 -in root.pfx -clcerts -nokeys -out wildcard.single.cer --legacy\n\n \n Once again, delete the header(s) above\n \n -----BEGIN CERTIFICATE-----\n \n in these files. Afterwards, run the following command:\n \n cat wildcard.single.cer wildcard_ca.cer > wildcard.cer\ncat root.single.cer root_ca.cer > root.cer\n\n \n Now we're going to generate the private keys. When generating the private keys, set a temporary password (just the word\n \n password\n \n is fine); we will remove the password in the subsequent step.\n \n # See notes above regarding --legacy and \"Import Password\"\nopenssl pkcs12 -in wildcard.pfx -nocerts -out wildcard.secrets.withpass.key --legacy\nopenssl pkcs12 -in root.pfx -nocerts -out root.secrets.withpass.key --legacy\n\n \n And finally, strip the passwords out for the final key files:\n \n openssl rsa -in wildcard.secrets.withpass.key -out wildcard.secret.key\nopenssl rsa -in root.secrets.withpass.key -out root.secret.key\n\n \n Now you have the files in PEM format, and you can go back to the section above to verify them.\n \n \n \n \n Step 2: Update Cluster\n \n \n This step may vary from customer to customer, so see the appropriate subsection.\n \n \n \n \n Orrum\n \n \n First, make sure you have the configuration repository checked out. In your\n \n config\n \n directory, clone it thusly:\n \n git clone https://github.com/datacoves/config-datacoves-orrum.git datacoves.orrum.com\n\n \n In the\n \n datacoves.orrum.com\n \n directory, reveal the secrets. If you call this command within a sub directory, you'll get an error about\n \n core-api.env.secret\n \n cannot be found.\n \n git secret reveal -f\n\n \n TODO: add instructions for setting up git secret\n \n \n Then in the\n \n base\n \n directory you will find\n \n root.cer\n \n ,\n \n root.secret.key\n \n ,\n \n wildcard.cer\n \n , and\n \n wildcard.secret.key\n \n . Replace these files with the new, verified files from step 1.\n \n \n Connect to the Orrum VPN. Instructions are here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum\n \n \n Make sure you are in your Orrum context, whatever that is named:\n \n # Use:\n# kubectl config get-contexts\n# To get context list if needed.\nkubectl config use-context orrum_new\n\n \n Then run setup base. Return to the root directory of your git checkout to run\n \n cli.py\n \n thusly:\n \n # Activate your venv first if necessary\n./cli.py setup_base\n\n \n After the cluster is updated (ingress will be updated), check the certificate:\n \n curl https://api.datacoves.orrum.com -vI\n\n \n This should output a bunch of information about the certificate, including:\n \n * Server certificate:\n* subject: CN=*.datacoves.orrum.com\n* start date: Apr 8 07:33:48 2024 GMT\n* expire date: Jul 1 07:33:47 2024 GMT\n* subjectAltName: host \"api.datacoves.orrum.com\" matched cert's \"*.datacoves.orrum.com\"\n* issuer: C=US; O=DigiCert Inc; OU=www.digicert.com; CN=Thawte TLS RSA CA G1\n* SSL certificate verify ok.\n\n \n (The CN should be the correct host, and the expire date should be correct).\n \n \n Check the non-wildcard version as well:\n \n curl https://datacoves.orrum.com -vI\n\n \n Log into Orrum's launchpad and go into one of the environments to make sure pomerium doesn't have any issues; pomerium is particularly sensitive to certificate problems such as not having the full certificate chain in the root.cer / wildcard.cer files.\n \n \n If everything works alright, let's push the secrets. Be careful to not push up the key files as they will show up as \"Untracked Files\" in a\n \n git status\n \n . It is recommended you manually add the files thusly:\n \n # Go back to the config directory\ncd config/datacoves.orrum.com\n\n# See what files changed\ngit status\n\n# Add only the changed files, do NOT add the .key files or the original .pfx\ngit add .gitsecret/paths/mapping.cfg base/root.cer base/wildcard.cer secrets/core-api.env.secret secrets/docker-config.secret.json.secret secrets/rabbitmq.env.secert\n\n# You can also add any other safe file that you modified, just not those keys!\n\ngit commit -m \"Update certificates\"\ngit push\n\n \n And it should be done!"},"58":{"url":"/docs_output/how-tos/upgrade-dbt-or-related-tools.html","snip":"Edit on github\n \n \n \n \n \n \n How to upgrade dbt or related tools\n \n \n \n \n dbt-coves\n \n \n \n Pull Request on dbt-coves and merge. This will deploy a new pypi version\n \n \n \n \n \n All libraries\n \n \n \n Get current version of new libraries\n \n \n Upgrade code-server (src/code-server/code-server) docker image requirements.txt and labels\n \n \n Upgrade ci images libraries: ci/airflow and ci/basic, update labels.\n \n \n Upgrade airflow image libraries, install the new libraries in the environment targeted for dag runs, update labels accordingly.\n \n \n Run script that updates labels on docker files"},"59":{"url":"/docs_output/how-tos/work-on-a-pre-release-locally.html","snip":"Edit on github\n \n \n \n \n \n \n Make and work on a pre-release locally\n \n \n Sometimes you need to change images and test them locally without affecting production releases.\n \n \n To do so:\n \n \n \n \n Build the image you just changed\n \n ./cli.py build_and_push <path to service> # i.e. src/core/api\n\n \n You'll need to specify the issue #\n \n \n This command will build and push a new image prefixing its name with the ticket number your provided.\n \n \n \n \n Generate the pre-release\n \n \n Once the image was pushed, you can create a new pre-release to try that image:\n \n ./cli.py generate_release\n\n \n This will create a new release file under /releases and will also be pushed to GitHub releases so other devs can reuse it.\n \n \n \n \n Set the pre-release on datacoveslocal.com cluster\n \n ./cli.py set_release\n\n \n Select\n \n datacoveslocal.com\n \n .\n \n \n You might need to undo the file changes before pushing to PR branch.\n \n \n \n \n Upgrade datacoves in local cluster\n \n ./cli.py install\n\n \n Select\n \n datacoveslocal.com"},"60":{"url":"/docs_output/implementation/","snip":"Edit on github"},"61":{"url":"/docs_output/implementation/operator.html","snip":"Edit on github\n \n \n \n \n \n \n Operator documentation\n \n \n \n \n Overview\n \n \n The datacoves\n \n operator\n \n is a kubernetes\n \n controller\n \n , written in go,\nscaffolded using\n \n kubebuilder\n \n . It is responsible for setting up and\nmanaging the kubernetes resources that make up a\n \n workspace\n \n (a.k.a. an\n \n environment\n \n ). Each workspace has its own k8s namespace. The operator's source\ncode is in\n \n src/core/operator/\n \n .\n \n \n The operator watches a few custom resources that specify what to set up. They\nare defined in\n \n api/v1/\n \n .\n \n \n \n \n \n Workspace\n \n : The main resource, fully describing a workspace. Parts of the\nconfiguration are held in other resources, but the workspace references them all\nand is the root of the configuration. Whenever a change to a model in the core\napi database impacts a workspace configuration, the core-api's workspace.sync\ntask recomputes and (re-)writes the corresponding workspace k8s resource. The\noperator detects the resource update and runs the reconciliation process to\napply any required changes to the kubernetes resources that compose the workspace.\n \n \n \n \n \n User\n \n : Each workspace has a set of users, and each user gets certain resources,\nsuch as a code-server deployment.\n \n \n \n \n \n HelmRelease\n \n : Most services set up by the operator are installed using helm.\nA HelmRelease specifies that a helm chart should be installed, using a certain\nversion and helm values.\n \n \n \n \n \n \n Background\n \n \n Some useful background knowledge to have and resources to review:\n \n \n \n \n Go\n \n \n \n The\n \n go spec\n \n is short, readable and precise. Use it.\n \n \n \n Effective go\n \n and the\n \n go FAQ\n \n .\n \n \n Understanding go's concurrency constructs. CSP, goroutines and channels.\n \n \n Understanding that go (like C) is pass by value, so the distinction between\n struct types and pointers to structs is often important.\n \n \n Understanding that errors are values in go.\n \n \n Understanding the\n \n context\n \n package.\n \n \n \n How controller-runtime's does logging\n \n .\n \n \n \n \n \n Kubernetes\n \n \n \n \n API concepts\n \n \n \n \n API conventions\n \n \n \n \n The kubebuilder book\n \n \n \n Understand resourceVersion and generation.\n \n \n Understand ownerReferences and finalizers.\n \n \n \n \n \n Implementation: Reconcilers\n \n \n \n \n Change detection and reconciliation\n \n \n The entry points to our code are the\n \n Reconcile\n \n methods for each resource, in\n \n controllers/*_controller.go\n \n . The framework\n \n watches\n \n kubernetes resources to\ndetermine when to call\n \n Reconcile\n \n . The\n \n SetupWithManager\n \n method can be used\nto influence when\n \n Reconcile\n \n should be called.\n \n \n Reconciliation must be idempotent. If an error is returned, or there's a panic,\nthe framework will retry calling\n \n Reconcile\n \n repeatedly, less frequently each\ntime.\n \n \n To simplify change detection and ensure deployments are restarted when a secret\nor configmap that affects them changes, we treat secrets and configmaps as\nimmutable values. We include a hash of their contents in their names. This means\nto start using the new version references to them must be updated. This implies\nthat resources using them will change too, which means all changes can be detected\nby watching the resource that has the reference, without checking the contents\nof the secret or configmap.\n \n \n \n \n Applying changes to derived resources\n \n \n Reconciliation is conceptualy stateless. We compute a set of derived resources\nfrom the current value of the Workspace resource. We would like to have a\nprimitive that is the equivalent of\n \n kubectl apply\n \n in our go code. Unfortunately\nreusing that mechanism is/was not available when writing the operator so we had\nto build our own resource diffing. These are the\n \n reconcile*\n \n functions in\n \n controllers/reconcilers.go\n \n .\n \n \n \n \n Concurrency\n \n \n The framework runs\n \n Reconcile\n \n concurrently for different resource types. It also\nruns the reconciliation for different resources concurrently, at most\n \n MaxConcurrentReconciles\n \n at once. Reconciliation of multiple changes to a single resource happens serially.\n \n \n We take advantage of this fact to isolate failures. The Workspace reconciler\napplies changes to HelmRelease and User resources. This way the reconciliaton of\na HelmRelease or a User failing won't make the whole Workspace reconciliation fail.\n \n \n \n \n Implementation: Helm runner\n \n \n Before having the\n \n helm\n \n module carry out the installation of helm charts by\nstarting helm subprocesses we used to call into helm's go code directly from\nthe helmrelease controller. This caused two problems:\n \n \n \n When the operator was restarted the helm release (stored by helm in a k8s secret)\n could be left in a pending-upgrade state, which should only happen if helm is\n still running. This is due to helm not cleaning up when interrupted.\n \n \n We run out of memory, most likely due to a memory leak involving helm state.\n \n \n \n To address these issues we implemented the\n \n helm\n \n module, which schedules helm\nsupbrocesses so that we can control their execution. It is a separate module\nthat runs a singleton scheduler process and receives requests to run helm over a\nchannel. The helmrelease_controller simply sends requests to this process\nwithout waiting or checking results.\n \n \n Currently helm install failures will be logged but won't be retried. Manual\nintervention is required in this case. In any case, retrying the whole helm\ninstall is unlikely to succeed if nothing changed. Certain kinds of intermitent\nfailures could be detected and retried within an operation if desired. But in\nthis case, not retrying the helmrelease reconciliation as a whole is best, I think.\n \n \n The meat of the implementation is in the\n \n run\n \n function. It keeps track of\nrunning and pending operations (and their potential memory usage) and spawns new\ngoroutines for each install/upgrade/uninstall operation. It is somewhat subtle\ncode. You should understand goroutines and channels well before touching it.\n \n \n When the operator is signaled by kubernetes to exit, we must be as gentle as\npossible with helm subprocesses to avoid leaving the releases in a bad state.\nThere's a grace period between the first signal that the program will exit\nand forceful termination. We use it to send SIGTERM to all the helm subprocesses,\nwhich should allow them to exit more cleanly than if they were SIGKILLed. We\nhaven't seen any more chart's left in\n \n pending-upgrade\n \n after this change."},"62":{"url":"/docs_output/","snip":"Edit on github\n \n \n \n \n \n \n config\n \n \n This directory holds configuration, organized by the cluster's domain name.\n \n \n Most sudirectories are git submodules, to keep their configuration separate.\n \n \n Every cluster configuration directory can have the following subdirectories:\n \n \n \n base: Kustomize directory for the kubernetes cluster global components and configuration.\n \n \n kind: Configuration to create a kind cluster.\n \n \n eks: Configuration to create an eks cluster.\n \n \n cluster-params.yaml, cluster-params.secret.yaml: Cluster configuration.\n \n \n secrets: Secrets, those that are not specific to an environment.\n \n \n environments: Environment configurations, with one subdirectory per environment.\n \n \n \n The datacoveslocal.com cluster, for example, looks like this:\n \n config/\n├── datacoveslocal.com/\n│ ├── base/\n│ ├── environments/\n│ ├── kind/\n│ ├── secrets/\n│ ├── cluster-params.secret.yaml\n│ ├── cluster-params.secret.yaml.secret\n│ └── cluster-params.yaml\n...\n\n \n \n \n docs\n \n \n Documentation.\n \n docs\n├── client-docs For clients.\n│ ├── jnj\n│ └── ...\n├── how-tos For devops. How to do certain things.\n│ ├── do-thing-x\n│ └── ...\n├── dev-logs Developer logs. Record something you did for future reference. Be careful not to include secrets.\n│ ├── 2021-09-eks-setup.md\n│ └── ...\n├── issues-resolutions For support team. How to solve common user issues\n│\n└── ...\n\n \n \n \n scripts\n \n \n Python scripts to manage the project. Usually called by ./cli.py commands.\n \n \n \n \n src\n \n \n Datacoves source code and docker image definitions. The core components are in\n \n src/core\n \n ."},"63":{"url":"/docs_output/issues-resolutions/airflow-corrupted-dag-logs.html","snip":"Edit on github\n \n \n \n \n \n \n DAG logs were serialized with a newer version of pickle than the installed on Airflow webserver\n \n \n \n \n Logs\n \n Traceback (most recent call last):\n File \"/home/airflow/.local/bin/airflow\", line 8, in <module>\n sys.exit(main())\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/__main__.py\", line 38, in main\n args.func(args)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/cli_parser.py\", line 51, in command\n return func(*args, **kwargs)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/cli.py\", line 99, in wrapper\n return f(*args, **kwargs)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py\", line 75, in scheduler\n _run_scheduler_job(args=args)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py\", line 46, in _run_scheduler_job\n job.run()\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/base_job.py\", line 244, in run\n self._execute()\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py\", line 739, in _execute\n self._run_scheduler_loop()\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py\", line 827, in _run_scheduler_loop\n num_queued_tis = self._do_scheduling(session)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py\", line 909, in _do_scheduling\n callback_to_run = self._schedule_dag_run(dag_run, session)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py\", line 1151, in _schedule_dag_run\n schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py\", line 68, in wrapper\n return func(*args, **kwargs)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py\", line 522, in update_state\n info = self.task_instance_scheduling_decisions(session)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py\", line 68, in wrapper\n return func(*args, **kwargs)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py\", line 640, in task_instance_scheduling_decisions\n tis = list(self.get_task_instances(session=session, state=State.task_states))\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py\", line 68, in wrapper\n return func(*args, **kwargs)\n File \"/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py\", line 441, in get_task_instances\n return tis.all()\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/query.py\", line 2683, in all\n return self._iter().all()\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py\", line 1335, in all\n return self._allrows()\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py\", line 408, in _allrows\n rows = self._fetchall_impl()\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py\", line 1243, in _fetchall_impl\n return self._real_result._fetchall_impl()\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py\", line 1636, in _fetchall_impl\n return list(self.iterator)\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/loading.py\", line 120, in chunks\n fetch = cursor._raw_all_rows()\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py\", line 400, in _raw_all_rows\n return [make_row(row) for row in rows]\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py\", line 400, in <listcomp>\n return [make_row(row) for row in rows]\n File \"/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py\", line 1816, in process\n return loads(value)\n File \"/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py\", line 275, in loads\n return load(file, ignore, **kwds)\n File \"/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py\", line 270, in load\n return Unpickler(file, ignore=ignore, **kwds).load()\n File \"/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py\", line 472, in load\n obj = StockUnpickler.load(self)\nValueError: unsupported pickle protocol: 5\n\n \n \n \n Solution\n \n \n Connect to scheduler or triggerer pod and then remove DAG by running:\n \n airflow dags delete <dag id>"},"64":{"url":"/docs_output/issues-resolutions/dbt-core-debugging.html","snip":"Edit on github\n \n \n \n \n \n \n Python dbt-core debugging\n \n \n \n \n Context: dbt does not respond to any of it's commands\n \n \n Due to changes in environment variable handling on dbt-core side, a read-only\n \n $DBT_PROJECT_DIR\n \n led to dbt not responding to anything but the\n \n --version\n \n call.\n \n \n \n \n \n All dbt commands returned\n \n exit code 2\n \n \n 2 The dbt invocation completed with an unhandled error (eg. ctrl-c, network interruption, etc).\n\n \n \n \n Solution\n \n \n Using dbt-core python library and it's\n \n dbtRunner\n \n gives us the possibility to receive that\n \n \"unhandled error\"\n \n \n >>> from dbt.cli.main import dbtRunner\n>>> dbt_cli = dbtRunner()\n>>> dbt_cli.invoke([\"ls\"])\ndbtRunnerResult(success=False, exception=OSError(30, 'Read-only file system'), result=None)"},"65":{"url":"/docs_output/issues-resolutions/docker-image-debugging.html","snip":"Edit on github\n \n \n \n \n \n \n Debugging images outside Datacoves.\n \n \n Sometimes we need to review images that are running in Datacoves in a simpler way to debug processes, review the versions of libraries, versions of pipelines, etc.\n \n \n \n Create\n \n compose.yaml\n \n or\n \n docker-compose.yaml\n \n file\n \n \n version: '3'\n\nservices:\n snowflake:\n image: \"taqy-docker.artifactrepo.jnj.com/datacoves/ci-basic-dbt-snowflake:3.1\"\n command: bash -c \"sleep infinity\"\n\n \n \n Run commands\n \n \n docker compose run --rm snowflake bash -c \"pip show dbt-core dbt-snowflake\"\n\n \n \n Get a terminal\n \n \n docker compose up -d\ndocker ps\ndocker exec -ti <container-id> /bin/bash"},"66":{"url":"/docs_output/issues-resolutions/docker-push-stopped-working.html","snip":"Edit on github\n \n \n \n \n \n \n Reset docker config authentication\n \n \n If that was the case, you might need to log in and log out again after a password reset:\n \n docker logout\n\n \n Then, remove the entry for taqy-docker.artifactrepo.jnj.com in\n \n ~/.docker/config.json\n \n .\n \n \n Finally, login again:\n \n docker login taqy-docker.artifactrepo.jnj.com\n\n \n \n \n Unlock your artifactory account\n \n \n Sometimes your account can get blocked and you need to unlock it.\n \n \n \n Go to\n \n appdevtools\n \n \n \n Under support, user acces, click on\n \n Unlock Artifactory Account\n \n ."},"67":{"url":"/docs_output/issues-resolutions/helm-chart.html","snip":"Edit on github\n \n \n \n \n \n \n Helm Chart Resolutions\n \n \n \n \n How to patch releases?\n \n \n Sometimes we want to change a value in the\n \n Helm Chart\n \n , but to do this we need to edit some component such as an\n \n adapter\n \n or the\n \n Operator\n \n and generate a new release, so this functionality is very useful to be able to skip that whole process and do our tests more quickly.\n \n \n \n \n Option No.1\n \n \n \n Get the values from the release.\n \n \n # helm get values <release> -n <namespace>\nhelm get values dev123-datahub -n dcw-dev123 > values.yaml\n\n \n \n Edit/add the values to the file.\n \n \n vi values.yaml\n\n \n \n Add the repository if does not exists.\n \n \n # helm repo add <name> <url>\nhelm repo add datahub https://helm.datahubproject.io/\n\n \n \n Patch the helm chart.\n \n \n # helm upgrade --version <x.x.x> -f values.yaml <release> <repository> -n <namespace>\nhelm upgrade --version 0.4.16 -f values.yaml dev123-datahub datahub/datahub -n dcw-dev123\n\n \n \n \n Option No.2\n \n \n \n Patch the helm chart.\n \n \n # helm upgrade <release> <chart> -n <namespace> --set key1=value1,key2=value2\nhelm upgrade dev123-datahub datahub/datahub -n dcw-dev123 --set key1=value1,key2=value2\n\n \n \n More info"},"68":{"url":"/docs_output/issues-resolutions/pomerium-not-allowing-access.html","snip":"Edit on github\n \n \n \n \n \n \n Pomerium does not allow access to environments\n \n \n \n \n Problem\n \n \n Launchapd works OK, but pomerium returning timeout, logs like these are found:\n \n {\"level\":\"info\",\"X-Forwarded-For\":[\"10.255.255.2,10.10.0.8\"],\"X-Forwarded-Host\":[\"authenticate-dev123.orrum.datacoves.com\"],\"X-Forwarded-Port\":[\"443\"],\"X-Forwarded-Proto\":[\"http\"],\"X-Real-Ip\":[\"10.255.255.2\"],\"ip\":\"127.0.0.1\",\"user_agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36\",\"request-id\":\"834a4284-9d39-474a-abb5-cd7203755386\",\"error\":\"Bad Request: internal/sessions: session is not found\",\"time\":\"2023-08-17T13:13:39Z\",\"message\":\"authenticate: session load error\"}\n{\"level\":\"info\",\"service\":\"envoy\",\"upstream-cluster\":\"pomerium-control-plane-http\",\"method\":\"GET\",\"authority\":\"authenticate-dev123.orrum.datacoves.com\",\"path\":\"/.pomerium\",\"user-agent\":\"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36\",\"referer\":\"\",\"forwarded-for\":\"10.255.255.2,10.10.0.8\",\"request-id\":\"834a4284-9d39-474a-abb5-cd7203755386\",\"duration\":15000.251354,\"size\":24,\"response-code\":504,\"response-code-details\":\"upstream_response_timeout\",\"time\":\"2023-08-17T13:13:55Z\",\"message\":\"http-request\"}\n\n \n \n \n Cause\n \n \n This is a DNS resolution issue that pomerium is having. Typically this happens when the cluster model has wrong values on\n \n internal_ip\n \n or\n \n external_ip\n \n .\nThis could have happened when the DB was copied to a different cluster, of the cluster changed their IPs.\n \n \n \n \n Solution\n \n \n Remove the values on those 2 fields and save the cluster model again. On\n \n save\n \n , it will regenerate those IPs and Pomerium will be reinstalled."},"69":{"url":"/index.template.html","snip":"Edit on github\n \n \n @content"}},"dirtCount":0,"index":[["└──",{"0":{"62":5}}],["│",{"0":{"62":14}}],["├──",{"0":{"62":14}}],["zookeeper",{"0":{"49":1}}],["zip",{"0":{"12":1}}],["`t",{"0":{"29":1}}],["```",{"0":{"46":1}}],["```boost",{"0":{"46":1}}],["``",{"0":{"29":3}}],["=~",{"0":{"29":2}}],["=",{"0":{"29":4,"30":3,"43":1,"54":1,"63":8,"64":1}}],["quickly",{"0":{"67":1}}],["quite",{"0":{"56":1}}],["queued",{"0":{"46":1,"63":1}}],["questions",{"0":{"44":1}}],["queries",{"0":{"43":1}}],["queryparam",{"0":{"55":1}}],["query",{"0":{"25":1,"29":3,"45":1,"63":1}}],["q",{"0":{"22":1}}],["739",{"0":{"63":1}}],["75",{"0":{"63":1}}],["75567b8958",{"0":{"54":1}}],["7c",{"0":{"57":1}}],["7ca1",{"0":{"56":1}}],["78",{"0":{"57":1}}],["78cc7cfb6",{"0":{"15":1}}],["7f",{"0":{"57":1}}],["76",{"0":{"57":1}}],["7b7rx",{"0":{"54":1}}],["7ba64ac2",{"0":{"26":1}}],["7d35",{"0":{"45":1}}],["7",{"0":{"23":1,"24":1,"28":1,"42":1,"63":28,"68":2}}],["79",{"0":{"4":1}}],["55z",{"0":{"68":1}}],["551241293703",{"0":{"26":1}}],["504",{"0":{"68":1}}],["522",{"0":{"63":1}}],["524288",{"0":{"46":1}}],["51",{"0":{"63":1}}],["512",{"0":{"0":1,"4":2}}],["57",{"0":{"57":1}}],["5b",{"0":{"57":1}}],["59",{"0":{"57":4}}],["5m",{"0":{"43":1}}],["537",{"0":{"68":4}}],["53",{"0":{"27":1}}],["56",{"0":{"23":1,"25":1}}],["5f8f64cf69",{"0":{"15":1}}],["5",{"0":{"11":1,"14":1,"18":1,"22":1,"25":1,"26":2,"28":1,"42":1,"63":1,"68":2}}],["|=",{"0":{"29":5}}],["|",{"0":{"6":1,"8":2,"14":4,"27":1,"38":3,"41":2,"45":1,"46":4,"49":5,"57":6}}],["$dbt",{"0":{"64":1}}],["$user",{"0":{"46":1,"56":4}}],["$pod",{"0":{"38":1}}],["$pods",{"0":{"38":1}}],["$project",{"0":{"15":2}}],["$8",{"0":{"38":1}}],["$ns",{"0":{"38":2}}],["$api",{"0":{"23":1}}],["$kctx",{"0":{"15":3}}],["$kubectl",{"0":{"6":2,"17":2}}],["$1",{"0":{"14":2,"38":1}}],["$",{"0":{"14":2,"15":4,"24":3,"30":1,"56":2}}],["$editor",{"0":{"6":1}}],["$cluster",{"0":{"3":2,"6":3,"15":3,"17":3,"39":2}}],["+x",{"0":{"26":1}}],["+",{"0":{"5":1,"12":1,"40":1,"43":2,"56":2}}],["+how+to+guides",{"0":{"1":1}}],["+how+to+request+access+",{"0":{"1":1}}],["~$",{"0":{"46":3}}],["~",{"0":{"4":4,"26":1,"56":1,"66":1}}],["834a4284",{"0":{"68":2}}],["88",{"0":{"57":1}}],["8a",{"0":{"57":1}}],["8a2b",{"0":{"26":1}}],["8b4b",{"0":{"45":2}}],["8",{"0":{"28":3,"42":1,"57":1,"63":1,"68":2}}],["86ad",{"0":{"20":1,"45":1}}],["8653",{"0":{"4":1}}],["8001",{"0":{"14":2}}],["827",{"0":{"63":1}}],["8231",{"0":{"45":2}}],["82",{"0":{"4":2,"56":1}}],["xhg2vb7t14+hbmutrscuv",{"0":{"57":1}}],["xxx",{"0":{"49":12}}],["xenial",{"0":{"46":1}}],["x509",{"0":{"35":1,"57":2}}],["xzvf",{"0":{"26":1}}],["xlarge",{"0":{"9":1}}],["x",{"0":{"4":2,"13":1,"28":2,"62":1,"67":3,"68":7}}],["39z",{"0":{"68":1}}],["33",{"0":{"57":2}}],["3a",{"0":{"57":1}}],["34",{"0":{"57":1}}],["34234523452524",{"0":{"56":1}}],["3268",{"0":{"39":1}}],["327112934799",{"0":{"2":1}}],["31",{"0":{"27":2}}],["38",{"0":{"23":1,"63":1}}],["3d19h",{"0":{"15":1}}],["36",{"0":{"12":13,"57":1,"68":4}}],["3",{"0":{"3":1,"5":1,"11":4,"25":6,"28":1,"37":1,"40":1,"42":1,"47":2,"56":2,"57":4,"65":2}}],["3099b8af",{"0":{"56":1}}],["30",{"0":{"0":1,"9":3,"32":5,"64":1}}],["utils",{"0":{"63":4}}],["utc",{"0":{"51":1}}],["ultimately",{"0":{"57":1}}],["ui",{"0":{"22":4,"23":3,"49":1}}],["u",{"0":{"12":1,"18":3}}],["url=",{"0":{"54":1}}],["url",{"0":{"6":3,"47":1,"52":1,"67":1}}],["urls",{"0":{"2":1,"40":1}}],["urlsafe",{"0":{"2":1}}],["unhandled",{"0":{"64":2}}],["unsupported",{"0":{"63":1}}],["unpickler",{"0":{"63":1}}],["unlock",{"0":{"66":3}}],["unlikely",{"0":{"61":1}}],["unless",{"0":{"22":1}}],["unfortunately",{"0":{"61":1}}],["undo",{"0":{"59":1}}],["understand",{"0":{"61":3}}],["understanding",{"0":{"61":4}}],["under",{"0":{"2":1,"3":4,"4":1,"10":1,"15":1,"17":1,"40":1,"47":1,"56":1,"57":1,"59":1,"66":1}}],["untracked",{"0":{"57":1}}],["until",{"0":{"7":1,"22":1,"40":1,"49":1,"56":1}}],["unable",{"0":{"56":1}}],["unavailable",{"0":{"56":2}}],["unbounded",{"0":{"45":1}}],["unused",{"0":{"23":4}}],["uncheck",{"0":{"22":1,"40":1}}],["unrestricted",{"0":{"5":1}}],["uninstall",{"0":{"4":1,"61":1}}],["unixseadmins",{"0":{"4":1}}],["upstream",{"0":{"68":2}}],["upgrading",{"0":{"57":1}}],["upgrades",{"0":{"28":1}}],["upgrade",{"0":{"28":2,"46":2,"47":1,"56":10,"58":4,"59":1,"61":3,"67":4}}],["upper",{"0":{"56":2}}],["uptime",{"0":{"23":2}}],["upon",{"0":{"22":1}}],["updating",{"0":{"12":1,"56":7}}],["updates",{"0":{"22":1,"28":1,"58":1}}],["updated",{"0":{"3":1,"16":1,"17":1,"47":1,"57":2,"61":1}}],["update",{"0":{"3":3,"6":4,"17":1,"18":1,"20":1,"22":1,"28":1,"39":2,"46":6,"56":10,"57":2,"58":2,"61":1,"63":2}}],["upload",{"0":{"2":1,"11":2}}],["up",{"0":{"2":1,"5":1,"11":1,"14":1,"18":1,"22":1,"26":1,"34":1,"40":1,"49":1,"52":1,"56":9,"57":3,"61":5,"65":1}}],["usable",{"0":{"57":1}}],["usage",{"0":{"14":3,"23":2,"43":3,"61":1}}],["usr",{"0":{"46":3}}],["usually",{"0":{"2":1,"3":1,"6":1,"22":1,"35":2,"62":1}}],["us",{"0":{"2":4,"18":2,"20":6,"26":1,"33":1,"45":1,"47":1,"56":3,"64":1}}],["uses",{"0":{"49":1}}],["useful",{"0":{"3":1,"23":1,"43":1,"61":1,"67":1}}],["used",{"0":{"2":3,"3":1,"5":1,"7":1,"15":2,"18":1,"22":1,"25":2,"28":1,"40":3,"52":1,"61":2}}],["use",{"0":{"0":2,"2":3,"4":1,"5":1,"6":1,"7":1,"8":2,"10":1,"12":4,"15":1,"22":2,"24":1,"25":1,"28":2,"34":2,"35":2,"40":1,"44":2,"56":8,"57":3,"61":2}}],["usermod",{"0":{"46":1}}],["userguide",{"0":{"14":4}}],["username=",{"0":{"8":1,"14":1}}],["username",{"0":{"4":1,"7":1,"8":2}}],["users",{"0":{"2":1,"4":5,"22":2,"26":1,"42":1,"56":4,"61":1}}],["user",{"0":{"0":1,"2":12,"4":3,"5":1,"6":1,"7":3,"9":1,"11":2,"12":4,"14":3,"15":1,"17":1,"18":4,"20":2,"22":4,"23":2,"25":2,"26":6,"31":1,"37":1,"42":2,"44":5,"46":3,"53":2,"56":1,"61":4,"62":1,"66":1,"68":2}}],["using",{"0":{"0":2,"2":6,"7":1,"8":1,"15":1,"17":1,"18":2,"22":4,"23":1,"26":1,"30":1,"40":1,"42":1,"45":2,"47":1,"52":2,"53":1,"55":1,"56":3,"57":2,"61":5,"64":1}}],["9d39",{"0":{"68":2}}],["9ddkw",{"0":{"15":1}}],["909",{"0":{"63":1}}],["99",{"0":{"63":1}}],["9a",{"0":{"57":1}}],["94",{"0":{"57":1}}],["91vkolyvhsemuk5lukz1nixowvzhcf",{"0":{"57":1}}],["91bd2205",{"0":{"20":1,"45":1}}],["9pm",{"0":{"51":1}}],["9b8f",{"0":{"26":1}}],["9",{"0":{"2":1,"9":1,"24":3,"25":2,"26":2,"42":1}}],["jul",{"0":{"57":1}}],["just",{"0":{"22":2,"30":1,"39":1,"40":1,"47":1,"56":1,"57":3,"59":1}}],["jbszqlyzkyzbxq5eik3oundsb+n5zo+tshrrl45wa9fzmvaizzmtehxjwubidgl7",{"0":{"57":1}}],["jbfl",{"0":{"7":1}}],["jan",{"0":{"57":1}}],["janbgkqhkig9w0baqsfadcb",{"0":{"57":1}}],["janisdd",{"0":{"25":1}}],["journey",{"0":{"40":1}}],["job=",{"0":{"43":1}}],["jobs",{"0":{"42":3,"63":5}}],["job",{"0":{"4":2,"42":2,"54":1,"63":8}}],["jinjahtml",{"0":{"25":1}}],["jinja",{"0":{"25":2}}],["jira",{"0":{"5":3}}],["json=payload",{"0":{"54":1}}],["json",{"0":{"22":1,"27":1,"54":1,"56":2,"57":1,"66":1}}],["jfrog",{"0":{"11":5}}],["j",{"0":{"7":2,"44":2,"56":12}}],["jrddatacoves",{"0":{"7":2}}],["jenkinsfile",{"0":{"5":1,"42":1}}],["jenkins",{"0":{"2":2,"5":6,"7":3,"42":2}}],["jnjitod",{"0":{"1":1}}],["jnj",{"0":{"1":7,"2":15,"3":2,"4":3,"5":2,"6":2,"7":10,"8":3,"11":3,"15":8,"17":2,"24":1,"37":1,"39":8,"41":2,"42":1,"56":2,"62":1,"65":1,"66":2}}],["08",{"0":{"68":2}}],["07",{"0":{"57":2}}],["03",{"0":{"57":1}}],["0e",{"0":{"57":1}}],["06",{"0":{"57":1}}],["0d",{"0":{"57":1}}],["0d74",{"0":{"20":1,"45":1}}],["02",{"0":{"57":1}}],["09",{"0":{"57":4,"62":1}}],["05",{"0":{"57":1}}],["00",{"0":{"57":11}}],["00b264446505",{"0":{"26":2}}],["01",{"0":{"57":1}}],["0x2",{"0":{"57":4}}],["0f8e4c48",{"0":{"12":2}}],["0",{"0":{"2":3,"3":2,"12":13,"14":1,"15":2,"24":2,"25":11,"26":3,"27":1,"30":1,"43":1,"46":1,"49":3,"56":20,"67":1,"68":12}}],["ls",{"0":{"64":1}}],["ls0tls1crudjtibdrvjusuzjq0furs0tls0tck1jsum1ekndqwmrz0f3sujbz0lcqurbtkjna3foa2lhoxcwqkfrc0zbrefwtvjnd0vrwurwuvferxdwcmrxsmwky201bgrhvnpnqjryrfrjeu1uqxlovev4tlrnmu1gb1heve15tvrbeu1qrxhove0xtuzvd0zurvrnqkvhqtfvrqpbee1lytnwavpysnvawfjsy3pdq0ftsxdeuvlks29aswh2y05buuvcqlfbrgdnrvbbrendqvfvq2dnrujbt2jpcmfhoufvsdvlwgpmefdnqzbone5juhvqsvptnmplnmxbm29stvawuhyyd1hlalphcefsvnfowvdxchl3actzzm8kt1llr1nuc2hpde9dbnvyu094svhoy1bnr1zmn1revlzgbu04ww5kszbmohdlwmxlddniyu9owfjkeknzykjomgoydnpzsgx0zgrebhkvthpwawpnqlpnrhy1uutkeehnsef0aud6ag4xs2xvt2xkrgozv1lpv1vjv0ladzzhewv2cnnhym1rd3a1rejwqjbvn3v2bedmd1ruq3rzc3nhdni2ddz6mwtznhhnuumxvtlonulhv0uxdeurzgzwmmzzwdykz3d1c0teogneskfivmfrl2lwk3pkcxrxrnjhovfnedbeelpqyzrtu1dnvdzyvxzjbtlbbtlrmvnssxc5odlgraphelh6bgxqcxzyswnnu1rwsw9jq0f3rufbyu5dtuvbd0rnwurwujbqqvfil0jbuurbz0trtue4r0exvwrfd0vcci93uuznqu1cqwy4d0hrwurwujbpqkjzruzlnnjeexbrk3vreggxwu8zs0jkbmthyu1tnudnqtbhq1nxr1njyjmkrfffqkn3vufbnelcqvfcdk52clzjrjfaz1fdmznpbdzrr0gzchjjn3rwrmcvotf3uvnzzkm2sfm2cwriverucwpnyxhoeeyvblzzbfeykzrmn0uxvuzodudsouduzlvvs2fiqzb1cwx6bupqadjvuxjrz3hzqnd3egxtosszchjncnluogz5m29um21jawr0azzlsllicm5wzs9qznlwn1j5euhva0pvvgiwcwfvakxomvzhvfoyrmjlk0zjeg50shckdwj4bnlsmhzlcgexddfoovljndfjynfzugrbmvfdzvyvr1hndwn4z0u4bud1vfzqqlu1medybg1qwnrzvjg5dgp3tvpytvvobznmakdqnvvnmnlftmtxaw9ra2hqukrmruzgqxpzuzmrsu5twnawmklbutrrnknsynj0vmc5zdfrcky4d1fzaytjuxurmne3t25wous5cudyexdraknsd0ztv1n2uwotls0tluvorcbdrvjusuzjq0furs0tls0tcg==",{"0":{"26":1}}],["lcgdnbdjyb8nahyr",{"0":{"57":1}}],["ln",{"0":{"26":1,"46":2}}],["l",{"0":{"15":2}}],["led",{"0":{"64":1}}],["left",{"0":{"61":2}}],["legacy",{"0":{"57":11}}],["less",{"0":{"24":1,"61":1}}],["letter",{"0":{"24":1}}],["letsencrypt",{"0":{"35":2}}],["lets",{"0":{"21":1}}],["let",{"0":{"20":1,"34":1,"35":1,"57":2}}],["leaving",{"0":{"61":1}}],["leave",{"0":{"7":1,"47":1}}],["leak",{"0":{"61":1}}],["learn",{"0":{"13":1,"50":1}}],["level",{"0":{"4":1,"21":2,"68":2}}],["lt",{"0":{"4":4,"5":1,"8":7,"11":2,"12":4,"14":3,"18":1,"26":2,"28":1,"29":2,"32":2,"33":5,"35":9,"43":3,"45":14,"51":4,"56":1,"57":1,"59":1,"63":3,"65":1,"67":11}}],["loop",{"0":{"63":2}}],["looking",{"0":{"57":1}}],["looks",{"0":{"56":1,"57":1,"62":1}}],["look",{"0":{"2":1,"40":1,"56":3,"57":1}}],["lot",{"0":{"56":1}}],["located",{"0":{"56":1}}],["locate",{"0":{"56":1}}],["locally",{"0":{"22":1,"59":2}}],["localhost",{"0":{"14":2,"39":1}}],["local",{"0":{"14":1,"22":2,"33":3,"46":1,"54":1,"59":1,"63":29}}],["loudly",{"0":{"56":1}}],["long",{"0":{"44":1,"56":2}}],["longer",{"0":{"25":1}}],["lo",{"0":{"26":2}}],["logout",{"0":{"66":1}}],["logic",{"0":{"56":1}}],["login",{"0":{"11":3,"12":3,"14":4,"20":2,"56":2,"66":2}}],["logged",{"0":{"56":2,"61":1}}],["logging",{"0":{"21":2,"61":1}}],["log",{"0":{"21":1,"33":1,"47":1,"56":10,"57":1,"66":2}}],["logs",{"0":{"2":4,"8":2,"9":1,"15":2,"29":1,"32":2,"33":1,"54":1,"56":1,"62":2,"63":2,"68":1}}],["loki",{"0":{"9":1,"29":3,"32":1,"33":5,"43":3,"54":6}}],["loads",{"0":{"63":2}}],["loading",{"0":{"63":1}}],["loader",{"0":{"42":1}}],["loaddata",{"0":{"15":1}}],["loadbalancer",{"0":{"2":1,"6":1}}],["load",{"0":{"2":2,"42":1,"63":5,"68":1}}],["lib",{"0":{"63":28}}],["libraries",{"0":{"56":3,"58":5,"65":1}}],["library",{"0":{"28":1,"64":1}}],["life",{"0":{"32":1}}],["lifecycle",{"0":{"2":1,"32":7,"33":1}}],["limited",{"0":{"32":1}}],["limits",{"0":{"24":3,"46":2}}],["limit",{"0":{"24":3,"32":1,"43":1}}],["livenessprobe",{"0":{"56":2}}],["live",{"0":{"22":3,"23":1,"34":1,"47":1}}],["licences",{"0":{"22":2}}],["licenes",{"0":{"22":1}}],["linting",{"0":{"25":1}}],["linter",{"0":{"25":2}}],["link",{"0":{"22":1,"46":1}}],["line",{"0":{"22":1,"29":1,"30":2,"56":3,"57":1,"63":29}}],["linuxbrew",{"0":{"56":4}}],["linux",{"0":{"4":1,"26":3,"56":2}}],["listcomp",{"0":{"63":1}}],["listing",{"0":{"56":1}}],["listening",{"0":{"22":1}}],["listbucket",{"0":{"2":2,"9":1,"32":1,"53":1}}],["list",{"0":{"2":1,"3":2,"12":1,"15":1,"20":3,"24":1,"30":1,"32":1,"38":1,"41":2,"45":1,"46":5,"47":4,"56":2,"57":1,"63":2}}],["likely",{"0":{"61":1}}],["like",{"0":{"2":3,"5":1,"17":1,"22":1,"25":6,"27":1,"40":2,"47":1,"53":1,"56":3,"57":3,"61":2,"62":1,"68":3}}],["ll",{"0":{"2":3,"4":1,"5":1,"23":1,"56":1,"57":1,"59":1}}],["launchapd",{"0":{"68":1}}],["launchpad",{"0":{"56":1,"57":1}}],["last",{"0":{"40":1,"57":1,"63":1}}],["labeled",{"0":{"56":1}}],["label",{"0":{"20":2,"43":1}}],["labels",{"0":{"0":6,"9":6,"20":4,"43":1,"58":4}}],["larger",{"0":{"34":1}}],["large",{"0":{"2":1,"9":1,"24":2}}],["latest",{"0":{"2":1,"3":1,"6":1,"13":1,"14":5,"16":1,"23":1,"28":1,"39":1,"47":2}}],["later",{"0":{"0":2,"22":1,"34":1}}],["270",{"0":{"63":1}}],["275",{"0":{"63":1}}],["273d",{"0":{"56":8}}],["2f",{"0":{"57":1}}],["2fmanagedclusters",{"0":{"56":1}}],["2c",{"0":{"57":1}}],["22",{"0":{"57":2}}],["22f9d484",{"0":{"26":2}}],["2b",{"0":{"57":1}}],["251354",{"0":{"68":1}}],["255",{"0":{"68":6}}],["2552cd9b",{"0":{"45":2}}],["25",{"0":{"57":2}}],["2581bfb0",{"0":{"45":1}}],["2683",{"0":{"63":1}}],["26h",{"0":{"56":4}}],["26",{"0":{"24":1}}],["28",{"0":{"24":1,"57":1}}],["21",{"0":{"15":1,"57":3}}],["2038",{"0":{"57":1}}],["2013",{"0":{"57":1}}],["2017",{"0":{"57":1}}],["2012",{"0":{"2":3,"9":1,"32":1,"53":1}}],["20gi",{"0":{"45":2}}],["20",{"0":{"25":1,"27":1,"46":1}}],["2023",{"0":{"68":2}}],["2027",{"0":{"57":1}}],["2025",{"0":{"57":2}}],["202411140044",{"0":{"56":1}}],["202410250048",{"0":{"56":1}}],["2024",{"0":{"25":2,"57":4}}],["2021",{"0":{"15":1,"62":1}}],["200",{"0":{"9":3}}],["232d",{"0":{"56":10}}],["23",{"0":{"5":1,"57":2}}],["295567f106ff46139ad4edf24e52fc31",{"0":{"29":1}}],["29",{"0":{"4":1,"24":1}}],["244",{"0":{"63":1}}],["24",{"0":{"2":1,"57":2,"68":1}}],["2",{"0":{"2":4,"3":1,"5":1,"11":4,"15":2,"25":2,"26":3,"35":2,"40":1,"42":1,"47":4,"54":1,"56":4,"57":3,"64":2,"67":1,"68":4}}],["y2cc+i3f8bwbykrr92fmb8a=",{"0":{"57":1}}],["yarnpkg",{"0":{"50":1}}],["yarn",{"0":{"50":3}}],["yaml",{"0":{"2":4,"3":5,"6":2,"8":2,"14":5,"15":1,"16":2,"17":1,"25":1,"28":1,"33":2,"45":2,"54":1,"56":3,"62":5,"65":2,"67":4}}],["y",{"0":{"32":1,"46":2}}],["yml",{"0":{"25":1,"42":2,"55":1}}],["yes",{"0":{"4":1}}],["yet",{"0":{"2":1,"3":2,"56":1}}],["year",{"0":{"2":1}}],["yourself",{"0":{"56":1}}],["youremail",{"0":{"41":1}}],["your",{"0":{"0":2,"2":14,"3":4,"4":11,"5":2,"6":1,"7":1,"9":2,"14":2,"15":2,"16":1,"17":1,"20":1,"26":2,"31":1,"32":3,"33":1,"34":10,"39":1,"40":2,"41":1,"44":1,"47":4,"53":4,"56":22,"57":5,"59":1,"66":2}}],["you",{"0":{"0":2,"2":10,"3":5,"4":4,"5":2,"6":1,"7":5,"12":4,"15":3,"18":1,"20":3,"21":1,"22":18,"23":4,"25":1,"27":1,"29":2,"30":3,"32":9,"33":1,"34":31,"39":3,"40":8,"41":2,"44":1,"45":1,"46":4,"47":7,"49":2,"54":1,"55":2,"56":50,"57":24,"59":5,"61":1,"62":1,"66":2}}],["ignore=ignore",{"0":{"63":1}}],["ignore",{"0":{"63":1}}],["ips",{"0":{"24":1,"68":2}}],["ip",{"0":{"12":1,"24":2,"27":1,"56":5,"68":4}}],["io",{"0":{"11":5,"13":1,"14":4,"26":1,"37":1,"46":3,"67":1}}],["iac",{"0":{"2":1,"17":1}}],["iam",{"0":{"2":10,"4":1,"7":2,"14":3,"23":2,"26":7,"53":1}}],["i",{"0":{"2":2,"4":1,"16":1,"18":1,"22":2,"25":3,"26":1,"29":2,"30":2,"34":1,"44":2,"56":6,"57":4,"59":1,"61":1}}],["ids",{"0":{"45":1}}],["idempotent",{"0":{"61":1}}],["idea",{"0":{"34":1,"56":1}}],["identity",{"0":{"2":1}}],["id",{"0":{"2":8,"3":2,"4":1,"16":2,"22":3,"26":3,"29":1,"45":7,"52":10,"63":1,"65":1,"68":2}}],["iris",{"0":{"2":2}}],["immutable",{"0":{"61":1}}],["impl",{"0":{"63":4}}],["implemented",{"0":{"61":1}}],["implementation",{"0":{"61":3}}],["implies",{"0":{"61":1}}],["impacts",{"0":{"61":1}}],["imposed",{"0":{"2":1}}],["imports",{"0":{"25":1}}],["import",{"0":{"2":1,"12":2,"30":1,"41":1,"54":2,"57":4,"64":1}}],["important",{"0":{"2":2,"12":1,"34":1,"47":1,"61":1}}],["image",{"0":{"21":2,"28":1,"31":1,"34":2,"40":8,"56":13,"58":2,"59":4,"62":1,"65":1}}],["image=f",{"0":{"21":1}}],["images",{"0":{"2":1,"7":2,"8":2,"15":7,"25":13,"28":4,"34":3,"39":1,"40":6,"56":1,"58":1,"59":1,"65":2}}],["iterator",{"0":{"63":1}}],["iter",{"0":{"63":1}}],["items",{"0":{"5":1,"25":1,"44":1,"47":1}}],["item",{"0":{"5":1,"7":1}}],["itsus",{"0":{"5":1,"7":1}}],["its",{"0":{"4":4,"7":10,"15":2,"41":2,"56":1,"59":1,"61":1}}],["it",{"0":{"2":10,"3":2,"5":2,"6":1,"7":2,"8":1,"12":3,"15":4,"16":1,"18":2,"22":6,"23":1,"25":4,"29":1,"30":2,"34":13,"41":1,"44":2,"45":2,"47":2,"48":1,"49":1,"51":2,"52":1,"53":1,"54":1,"56":18,"57":7,"59":1,"61":8,"64":2,"66":1,"68":1}}],["itx",{"0":{"2":10,"3":1,"4":4,"6":1,"15":1,"17":1,"26":2}}],["inotify",{"0":{"46":5}}],["ingester",{"0":{"43":2}}],["ingress",{"0":{"2":3,"6":2,"15":2,"17":1,"57":1}}],["inc",{"0":{"52":1,"57":9}}],["incompatible",{"0":{"28":1}}],["increased",{"0":{"24":1}}],["increasing",{"0":{"23":1}}],["including",{"0":{"22":1,"28":1,"57":1}}],["included",{"0":{"22":2}}],["include",{"0":{"12":1,"22":3,"52":1,"61":1,"62":1}}],["includes",{"0":{"4":1}}],["input",{"0":{"23":1}}],["invocation",{"0":{"64":1}}],["invocations",{"0":{"30":1}}],["involving",{"0":{"61":1}}],["involved",{"0":{"47":1}}],["invoke",{"0":{"23":1,"30":1,"45":1,"64":1}}],["invoice",{"0":{"22":4}}],["invoices",{"0":{"22":1}}],["inviting",{"0":{"22":1}}],["index",{"0":{"14":1,"32":3,"42":1}}],["indication",{"0":{"3":1}}],["infinity",{"0":{"65":1}}],["influence",{"0":{"61":1}}],["info",{"0":{"43":1,"45":1,"63":1,"67":1,"68":2}}],["inform",{"0":{"7":1,"23":1}}],["information",{"0":{"2":1,"3":1,"22":3,"23":1,"35":1,"57":1}}],["infra",{"0":{"12":1}}],["init",{"0":{"3":1,"6":1,"17":1}}],["initially",{"0":{"3":1,"56":1}}],["initialize",{"0":{"3":1,"30":1}}],["initial",{"0":{"2":1,"3":1,"15":1}}],["inside",{"0":{"15":1}}],["inspect",{"0":{"8":1,"14":1,"22":1,"23":2,"30":1,"39":1}}],["insert",{"0":{"2":1}}],["instead",{"0":{"57":2}}],["instruction",{"0":{"46":1}}],["instructions",{"0":{"1":1,"2":2,"4":1,"12":1,"22":2,"56":3,"57":2}}],["instances",{"0":{"7":1,"9":1,"24":4,"46":1,"63":2}}],["instance",{"0":{"2":4,"4":5,"9":1,"24":6,"47":1,"49":1,"63":2}}],["installation",{"0":{"2":1,"6":3,"10":2,"14":1,"17":4,"39":1,"41":2,"46":2,"56":9,"61":1}}],["install",{"0":{"2":1,"4":2,"6":5,"10":1,"11":3,"12":3,"13":2,"14":6,"17":6,"25":1,"26":1,"35":1,"37":2,"39":1,"46":6,"50":3,"54":1,"56":4,"58":1,"59":1,"61":3}}],["installed",{"0":{"2":3,"3":2,"4":1,"12":1,"15":1,"34":1,"40":1,"56":2,"61":2,"63":1}}],["installer",{"0":{"2":1,"39":2}}],["int",{"0":{"54":1}}],["introduce",{"0":{"28":1}}],["introduction",{"0":{"22":1}}],["intel",{"0":{"68":2}}],["intelligence",{"0":{"42":2}}],["intend",{"0":{"34":1}}],["integrates",{"0":{"22":1}}],["integration",{"0":{"22":2,"42":1}}],["integrationscluster",{"0":{"2":1}}],["interruption",{"0":{"64":1}}],["interrupted",{"0":{"61":1}}],["intermitent",{"0":{"61":1}}],["intermediate",{"0":{"57":1}}],["internally",{"0":{"47":1}}],["internal",{"0":{"18":1,"27":1,"68":2}}],["intervention",{"0":{"2":1,"28":1,"61":1}}],["interactive",{"0":{"2":1}}],["into",{"0":{"2":1,"6":3,"17":2,"34":2,"37":1,"39":2,"47":1,"56":18,"57":2,"61":1}}],["in",{"0":{"0":1,"1":1,"2":6,"3":10,"4":4,"5":3,"6":3,"7":5,"8":5,"10":2,"11":1,"12":2,"14":1,"15":8,"16":6,"17":4,"18":1,"20":1,"22":14,"23":1,"25":3,"28":2,"29":2,"30":3,"32":4,"33":2,"34":5,"35":1,"37":1,"38":1,"39":1,"40":7,"41":1,"44":2,"45":7,"46":4,"47":7,"49":4,"51":1,"52":5,"53":5,"55":1,"56":25,"57":36,"58":1,"59":1,"61":18,"62":1,"63":31,"64":1,"65":2,"66":2,"67":1}}],["isolate",{"0":{"61":1}}],["isn",{"0":{"3":1,"44":1,"57":1}}],["issuer=c=us",{"0":{"57":1}}],["issuer",{"0":{"35":1,"57":9}}],["issue",{"0":{"5":1,"15":1,"34":1,"35":2,"59":1,"68":1}}],["issues",{"0":{"2":1,"5":3,"57":1,"61":1,"62":2}}],["issued",{"0":{"0":1,"2":1}}],["is",{"0":{"0":2,"1":4,"2":11,"3":3,"4":1,"5":2,"6":1,"7":3,"8":1,"10":1,"12":3,"15":8,"17":2,"22":3,"23":2,"25":8,"28":3,"29":1,"32":1,"33":1,"34":11,"39":1,"40":1,"41":3,"42":1,"44":2,"45":4,"47":5,"49":1,"52":2,"56":37,"57":13,"61":20,"67":1,"68":3}}],["if",{"0":{"0":1,"2":5,"3":3,"8":2,"12":1,"15":1,"18":1,"22":3,"29":2,"34":6,"39":6,"44":1,"45":2,"47":3,"49":1,"52":1,"56":11,"57":11,"61":5,"66":1,"67":1}}],["human",{"0":{"28":1}}],["hubsextension",{"0":{"56":1}}],["hub",{"0":{"14":1}}],["h",{"0":{"18":3}}],["http",{"0":{"14":2,"54":1,"68":3}}],["https",{"0":{"1":4,"2":9,"3":2,"4":1,"6":1,"7":3,"11":3,"12":1,"13":1,"14":12,"15":2,"17":1,"20":1,"21":1,"22":1,"23":2,"25":13,"26":3,"29":2,"30":1,"35":1,"37":1,"44":1,"46":5,"47":1,"50":1,"52":1,"56":13,"57":4,"67":1}}],["html",{"0":{"13":1,"14":4,"21":1,"42":1}}],["held",{"0":{"61":1}}],["help",{"0":{"34":1,"40":1,"47":1,"52":1,"56":1}}],["helpful",{"0":{"23":1}}],["helmrelease",{"0":{"61":7}}],["helm",{"0":{"11":20,"46":6,"56":2,"61":20,"67":13}}],["headers",{"0":{"57":1}}],["headers=",{"0":{"54":1}}],["header",{"0":{"12":1,"22":1,"57":5}}],["health",{"0":{"7":1}}],["here",{"0":{"2":1,"3":2,"15":2,"18":1,"22":1,"23":1,"33":1,"34":1,"40":1,"56":8,"57":2}}],["hmac",{"0":{"4":2}}],["hmd",{"0":{"2":1,"7":2}}],["hit",{"0":{"40":1,"41":1,"57":1}}],["hide",{"0":{"3":1,"15":1,"16":1,"39":1}}],["higher",{"0":{"15":1,"24":1}}],["high",{"0":{"0":1,"34":1}}],["happened",{"0":{"68":1}}],["happen",{"0":{"61":1}}],["happens",{"0":{"2":1,"61":1,"68":1}}],["having",{"0":{"57":1,"61":1,"68":1}}],["haven",{"0":{"56":1,"61":1}}],["have",{"0":{"2":2,"3":1,"7":1,"8":1,"12":1,"15":1,"22":3,"24":3,"29":2,"32":3,"34":2,"41":2,"46":1,"47":5,"56":10,"57":10,"61":2,"62":1,"68":1}}],["had",{"0":{"56":1,"61":1}}],["hang",{"0":{"56":1}}],["handling",{"0":{"57":1,"64":1}}],["handle",{"0":{"3":1,"16":1}}],["hand",{"0":{"47":1}}],["hard",{"0":{"44":1}}],["hash",{"0":{"8":1,"56":1,"61":1}}],["has",{"0":{"2":2,"3":1,"15":1,"17":1,"20":1,"21":1,"22":1,"23":1,"25":1,"31":1,"32":2,"41":1,"45":1,"57":1,"61":3,"68":1}}],["hood",{"0":{"47":1}}],["hook",{"0":{"5":3,"42":1}}],["hooks",{"0":{"5":1,"42":1}}],["hot",{"0":{"34":2}}],["hotfix",{"0":{"34":10}}],["holds",{"0":{"15":2,"62":1}}],["home",{"0":{"4":4,"5":1,"46":1,"56":6,"63":29}}],["hosted",{"0":{"22":1,"46":1}}],["hosts",{"0":{"12":1}}],["host",{"0":{"3":2,"4":2,"16":2,"57":6,"68":1}}],["hostname",{"0":{"2":3,"54":1,"57":1}}],["how",{"0":{"2":1,"5":1,"6":1,"7":1,"8":1,"11":1,"12":1,"13":1,"15":2,"22":3,"26":1,"28":1,"29":3,"30":1,"33":1,"34":2,"35":1,"40":1,"41":1,"44":1,"45":1,"48":1,"50":1,"51":1,"52":1,"54":1,"55":1,"56":3,"58":1,"61":1,"62":3,"67":1}}],["however",{"0":{"0":1,"2":1,"34":1,"41":1,"56":3,"57":2}}],["hours",{"0":{"2":1}}],["wrapper",{"0":{"63":4}}],["writing",{"0":{"61":1}}],["written",{"0":{"61":1}}],["writes",{"0":{"61":1}}],["write",{"0":{"2":1,"7":1,"32":1}}],["wrong",{"0":{"56":1,"57":1,"68":1}}],["www",{"0":{"56":1}}],["wget",{"0":{"11":1,"37":1}}],["warning",{"0":{"57":1}}],["warehouse",{"0":{"2":3,"5":1}}],["waiting",{"0":{"61":1}}],["wait",{"0":{"49":1,"56":1}}],["watching",{"0":{"61":1}}],["watches",{"0":{"46":1,"61":2}}],["watch",{"0":{"15":2,"56":1}}],["want",{"0":{"12":1,"23":1,"29":1,"34":4,"40":1,"56":1,"57":1,"67":1}}],["way",{"0":{"2":1,"41":1,"56":2,"61":1,"65":1}}],["was",{"0":{"2":2,"4":1,"8":1,"22":1,"34":1,"42":1,"52":1,"59":1,"61":2,"66":1,"68":1}}],["wcr",{"0":{"2":2,"4":4,"26":1}}],["won",{"0":{"56":1,"61":2}}],["word",{"0":{"56":1,"57":1}}],["worspace",{"0":{"1":1}}],["workaround",{"0":{"44":1}}],["workbench",{"0":{"4":1,"27":1,"40":1,"56":4}}],["works",{"0":{"57":1,"68":1}}],["workstation",{"0":{"4":1,"6":1,"17":1}}],["workspaces",{"0":{"23":1}}],["workspace",{"0":{"1":1,"4":1,"7":1,"23":1,"61":13}}],["workflow",{"0":{"3":1,"22":1,"28":1}}],["work",{"0":{"2":1,"34":2,"44":2,"56":6,"59":1}}],["working",{"0":{"2":1,"4":2,"56":2}}],["worker",{"0":{"0":1,"2":1,"9":1,"29":1,"43":3,"56":6}}],["workers=enabled",{"0":{"20":3}}],["workerslarge",{"0":{"20":1}}],["workers",{"0":{"0":4,"2":1,"9":3,"20":1,"29":2}}],["workload",{"0":{"0":1}}],["would",{"0":{"0":1,"34":1,"47":1,"61":1}}],["wiki",{"0":{"35":1}}],["wire",{"0":{"22":1}}],["wizard",{"0":{"22":3}}],["windows",{"0":{"4":2,"33":1}}],["will",{"0":{"1":1,"2":8,"3":4,"6":2,"12":3,"15":6,"18":1,"20":1,"22":6,"25":1,"27":1,"29":2,"34":7,"39":1,"40":2,"44":1,"45":1,"47":3,"49":4,"56":16,"57":12,"58":1,"59":3,"61":4,"68":2}}],["wildcard",{"0":{"0":2,"2":1,"35":3,"57":31}}],["withpass",{"0":{"57":4}}],["within",{"0":{"3":1,"44":1,"57":1,"61":1}}],["without",{"0":{"2":1,"41":1,"44":1,"56":1,"57":2,"59":1,"61":2}}],["with",{"0":{"1":2,"2":15,"5":1,"6":1,"7":1,"8":1,"9":1,"12":4,"14":2,"15":6,"16":2,"18":1,"22":4,"24":1,"25":1,"27":1,"28":1,"29":4,"32":1,"34":3,"39":1,"40":1,"43":2,"44":2,"45":1,"49":2,"53":3,"56":10,"57":4,"59":1,"61":1,"62":1,"63":1,"64":1}}],["wish",{"0":{"0":1,"34":2,"49":1,"56":1}}],["whole",{"0":{"61":3,"67":1}}],["while",{"0":{"22":1}}],["which",{"0":{"3":1,"4":2,"20":1,"24":1,"34":2,"47":3,"56":4,"61":4}}],["what",{"0":{"3":4,"34":1,"39":1,"56":2,"57":1,"61":1}}],["whatever",{"0":{"0":1,"27":1,"56":1,"57":1}}],["whenever",{"0":{"61":1}}],["when",{"0":{"2":1,"3":1,"5":1,"22":3,"28":4,"30":1,"45":1,"46":1,"47":1,"49":1,"56":3,"57":1,"61":7,"68":2}}],["where",{"0":{"0":1,"2":3,"3":1,"6":1,"15":1,"22":1,"25":1,"28":1,"40":1,"53":1,"56":1}}],["well",{"0":{"15":1,"57":2,"61":1}}],["weaveworks",{"0":{"14":2}}],["weekly",{"0":{"4":1}}],["were",{"0":{"3":1,"5":1,"22":1,"61":1,"63":1}}],["webserver",{"0":{"56":2,"63":1}}],["webhooks",{"0":{"22":2}}],["webhook",{"0":{"5":2}}],["web",{"0":{"2":1,"4":1,"15":1,"56":1}}],["we",{"0":{"0":3,"2":4,"3":2,"4":1,"8":1,"11":1,"15":3,"22":2,"23":1,"28":6,"29":2,"32":4,"44":2,"45":1,"47":3,"52":1,"53":1,"56":9,"57":6,"61":13,"65":1,"67":2}}],["khtml",{"0":{"68":2}}],["kwds",{"0":{"63":2}}],["kwargs",{"0":{"63":5}}],["konnectivity",{"0":{"56":2}}],["kafka",{"0":{"49":4}}],["kim",{"0":{"56":1}}],["killed",{"0":{"43":1}}],["kinds",{"0":{"61":1}}],["kind=general",{"0":{"20":2}}],["kind",{"0":{"0":2,"2":1,"9":2,"26":1,"27":1,"45":2,"46":4,"62":3}}],["knowledge",{"0":{"61":1}}],["know",{"0":{"30":1,"34":1}}],["known",{"0":{"5":1,"15":1}}],["k4p5w",{"0":{"27":1}}],["k",{"0":{"14":1,"61":1}}],["kustomization",{"0":{"14":1}}],["kustomize",{"0":{"14":2,"62":1}}],["kubebuilder",{"0":{"61":2}}],["kuberlr",{"0":{"26":7}}],["kubernates",{"0":{"8":1}}],["kubernetes",{"0":{"0":3,"2":2,"4":1,"6":1,"8":3,"14":8,"15":4,"21":1,"26":4,"45":4,"46":5,"56":11,"61":6,"62":1}}],["kube",{"0":{"4":2,"8":2,"12":3,"14":4,"26":3,"27":3,"43":6,"56":6}}],["kubelogin",{"0":{"4":3,"12":1}}],["kubectl",{"0":{"2":3,"6":3,"8":10,"12":7,"14":13,"15":9,"17":1,"20":3,"26":3,"27":3,"38":2,"39":1,"45":10,"46":1,"49":6,"51":2,"54":1,"56":25,"57":2,"61":1}}],["kcc",{"0":{"23":1}}],["kctx=$",{"0":{"15":1}}],["kc",{"0":{"4":3}}],["ktlo",{"0":{"2":1}}],["kept",{"0":{"3":1,"10":1,"17":1}}],["keeps",{"0":{"61":1}}],["keep",{"0":{"2":1,"18":1,"40":2,"47":1,"52":1,"56":2,"62":1}}],["kenvue",{"0":{"1":3,"4":1,"7":2,"8":1,"11":6,"26":2,"48":1,"55":2,"56":6}}],["key2=value2",{"0":{"67":2}}],["keychain",{"0":{"57":1}}],["key=fill",{"0":{"53":2}}],["key1=value1",{"0":{"67":2}}],["key1",{"0":{"32":1}}],["keys",{"0":{"23":1,"41":3,"44":1,"57":3}}],["keyrings",{"0":{"41":1}}],["keyring",{"0":{"3":1,"15":2,"16":1}}],["key",{"0":{"0":3,"2":6,"5":1,"9":3,"14":1,"32":1,"33":3,"35":7,"41":8,"42":2,"46":5,"53":1,"57":24}}],["k8sadmin",{"0":{"4":1}}],["k8smonitor",{"0":{"4":1}}],["k8soperator",{"0":{"4":1}}],["k8s",{"0":{"0":3,"2":2,"9":3,"14":1,"20":5,"21":3,"26":1,"46":1,"61":3}}],["rsa",{"0":{"57":13}}],["rsync",{"0":{"39":2}}],["risk",{"0":{"56":1}}],["right",{"0":{"2":1,"22":2,"30":1,"40":1,"56":3}}],["rc",{"0":{"56":1}}],["rm",{"0":{"41":1,"65":1}}],["ruler",{"0":{"54":1}}],["rule",{"0":{"32":8}}],["rules",{"0":{"15":1,"32":2,"54":1}}],["ruff",{"0":{"25":2}}],["runtime",{"0":{"61":1}}],["runnner",{"0":{"46":1}}],["runner",{"0":{"46":2,"61":1}}],["running",{"0":{"10":1,"15":6,"28":1,"38":1,"43":1,"61":2,"63":1,"65":1}}],["runs",{"0":{"4":1,"42":1,"56":1,"58":1,"61":4}}],["run",{"0":{"2":1,"3":2,"4":1,"6":1,"14":1,"15":3,"20":2,"22":4,"23":3,"25":1,"30":5,"35":1,"37":1,"39":1,"42":1,"46":2,"50":3,"55":2,"56":6,"57":3,"58":1,"61":3,"63":12,"65":2}}],["rabbitmq",{"0":{"57":1}}],["rate",{"0":{"43":1}}],["rainbow",{"0":{"25":1}}],["raw",{"0":{"14":1,"63":2}}],["ran",{"0":{"4":1}}],["random",{"0":{"2":1}}],["ryan",{"0":{"7":1}}],["r5",{"0":{"2":1,"9":1}}],["rds",{"0":{"2":1,"9":1}}],["row",{"0":{"63":6}}],["rows",{"0":{"63":5}}],["rotation",{"0":{"33":1}}],["rotate",{"0":{"32":2}}],["rollout",{"0":{"27":1}}],["role=datacoves",{"0":{"18":1}}],["role",{"0":{"2":5,"4":2,"7":1,"26":1}}],["roles",{"0":{"2":2,"4":1,"7":5}}],["robertostermann",{"0":{"25":1}}],["route53",{"0":{"2":1}}],["root",{"0":{"0":3,"2":1,"9":3,"35":3,"56":1,"57":35,"61":1}}],["r",{"0":{"2":1,"3":1,"4":1,"6":1,"7":1,"17":1,"26":1,"30":3,"37":1,"50":1,"56":3}}],["rnd",{"0":{"1":1}}],["reinstalled",{"0":{"68":1}}],["returning",{"0":{"68":1}}],["returned",{"0":{"61":1,"64":1}}],["return",{"0":{"57":2,"63":15}}],["retain",{"0":{"45":5}}],["retried",{"0":{"61":2}}],["retrieve",{"0":{"6":1}}],["retrying",{"0":{"61":2}}],["retry",{"0":{"29":1,"56":2,"61":1}}],["regenerate",{"0":{"68":1}}],["regex",{"0":{"43":1}}],["regarding",{"0":{"57":2}}],["region=fill",{"0":{"53":1}}],["region",{"0":{"32":1,"33":1}}],["registrations",{"0":{"52":1}}],["registry",{"0":{"8":1,"11":1,"14":1,"15":1}}],["register",{"0":{"22":1,"52":2}}],["reusing",{"0":{"61":1}}],["reuse",{"0":{"41":1,"59":1}}],["reusable",{"0":{"40":1}}],["remember",{"0":{"29":1}}],["removing",{"0":{"22":1}}],["removed",{"0":{"57":1}}],["remove",{"0":{"3":1,"22":1,"23":3,"45":1,"56":1,"57":2,"63":1,"66":1,"68":1}}],["remote",{"0":{"1":1,"4":2}}],["referer",{"0":{"68":1}}],["references",{"0":{"55":1,"61":2}}],["reference",{"0":{"7":1,"22":1,"28":1,"30":1,"39":1,"42":1,"61":1,"62":1}}],["refs",{"0":{"34":2,"56":1}}],["refreshing",{"0":{"40":1}}],["refresh",{"0":{"23":1}}],["reflected",{"0":{"22":1}}],["re",{"0":{"22":2,"34":2,"49":2,"52":1,"56":3,"57":1,"61":1}}],["rename",{"0":{"12":2}}],["reached",{"0":{"54":1}}],["react",{"0":{"50":1}}],["real",{"0":{"25":1,"63":1,"68":1}}],["reassign",{"0":{"18":2}}],["reason=",{"0":{"43":1}}],["reason",{"0":{"7":3,"43":1,"49":1}}],["readable",{"0":{"61":1}}],["readinessprobe",{"0":{"56":2}}],["readme",{"0":{"56":1}}],["readonly",{"0":{"45":1}}],["readwriteonce",{"0":{"45":2}}],["ready",{"0":{"15":2,"34":1,"40":1}}],["readthedocs",{"0":{"13":1}}],["read",{"0":{"1":1,"2":3,"9":1,"28":1,"32":1,"52":1,"56":1,"57":1,"64":2}}],["related",{"0":{"58":1}}],["relatively",{"0":{"56":2}}],["relationship",{"0":{"13":1}}],["releasing",{"0":{"56":2}}],["release=",{"0":{"39":1}}],["release",{"0":{"11":1,"28":2,"34":13,"39":10,"40":2,"45":3,"47":14,"56":16,"59":7,"61":1,"67":5}}],["released",{"0":{"6":1}}],["releases",{"0":{"2":1,"14":1,"26":2,"28":3,"34":2,"47":9,"56":1,"59":3,"61":1,"67":1}}],["releae",{"0":{"56":1}}],["relevant",{"0":{"47":1}}],["reload",{"0":{"40":2}}],["rely",{"0":{"3":1}}],["revert",{"0":{"56":1}}],["reveal",{"0":{"3":1,"4":1,"6":2,"15":1,"16":2,"17":2,"39":1,"56":2,"57":2}}],["review",{"0":{"3":1,"16":1,"29":1,"32":1,"34":1,"39":1,"40":1,"61":1,"65":2}}],["revoke",{"0":{"2":1}}],["response",{"0":{"68":3}}],["responsible",{"0":{"33":1,"61":1}}],["responding",{"0":{"64":1}}],["respond",{"0":{"64":1}}],["resemble",{"0":{"57":1}}],["reset",{"0":{"49":2,"66":2}}],["resetting",{"0":{"49":1}}],["result=none",{"0":{"64":1}}],["result",{"0":{"30":1,"63":7}}],["results",{"0":{"30":1,"49":1,"61":1}}],["res",{"0":{"30":2}}],["resolution",{"0":{"68":1}}],["resolutions",{"0":{"62":1,"67":1}}],["resolve",{"0":{"27":1}}],["resouorce",{"0":{"45":1}}],["resourceversion",{"0":{"61":1}}],["resourcetype",{"0":{"56":1}}],["resourcegroups",{"0":{"12":1,"45":3}}],["resources",{"0":{"6":1,"10":1,"15":2,"17":2,"45":6,"61":12}}],["resource",{"0":{"2":4,"9":2,"12":2,"20":5,"23":2,"32":4,"45":9,"53":2,"61":9}}],["restarted",{"0":{"61":2}}],["restarting",{"0":{"56":1}}],["restart",{"0":{"27":1}}],["restarts",{"0":{"15":2,"27":1}}],["rest",{"0":{"18":1}}],["restore",{"0":{"18":2,"56":3}}],["restrictions",{"0":{"2":1}}],["resides",{"0":{"2":1}}],["repeatedly",{"0":{"61":1}}],["repeate",{"0":{"18":1}}],["replace",{"0":{"49":1,"56":1,"57":1}}],["replacing",{"0":{"2":2,"53":1}}],["replicated",{"0":{"8":2}}],["repositories",{"0":{"2":2,"8":2,"17":1,"44":2,"46":1,"56":1}}],["repository",{"0":{"2":3,"3":3,"5":2,"6":1,"7":1,"10":3,"11":1,"15":2,"17":5,"28":1,"33":1,"34":1,"39":1,"44":4,"46":2,"56":9,"57":1,"67":2}}],["repos",{"0":{"1":1,"2":8,"3":2,"15":1,"41":1}}],["repo",{"0":{"1":3,"2":9,"3":3,"4":1,"5":1,"15":5,"16":1,"21":1,"28":1,"37":1,"39":2,"42":3,"44":1,"55":1,"67":2}}],["redis",{"0":{"56":4}}],["redirects",{"0":{"22":1}}],["redirect",{"0":{"2":1,"52":1}}],["redhat",{"0":{"25":1}}],["redundancy",{"0":{"0":1}}],["recent",{"0":{"63":1}}],["recently",{"0":{"22":1}}],["received",{"0":{"23":1,"57":2}}],["receives",{"0":{"22":2,"61":1}}],["receive",{"0":{"2":2,"5":2,"56":1,"57":1,"64":1}}],["recomputes",{"0":{"61":1}}],["recommend",{"0":{"0":2,"2":1}}],["recommended",{"0":{"0":2,"8":1,"14":1,"56":1,"57":2}}],["recover",{"0":{"45":1}}],["reconciliaton",{"0":{"61":1}}],["reconciliation",{"0":{"61":8}}],["reconciler",{"0":{"61":1}}],["reconcilers",{"0":{"61":2}}],["reconcile",{"0":{"61":6}}],["reconfiguration",{"0":{"28":3}}],["reconnect",{"0":{"4":1}}],["record",{"0":{"2":1,"22":1,"62":1}}],["requests",{"0":{"45":1,"54":2,"56":1,"61":2}}],["requesting",{"0":{"4":1,"7":3}}],["requested",{"0":{"2":1}}],["request",{"0":{"1":3,"2":5,"4":4,"5":2,"7":11,"45":1,"48":1,"58":1,"68":4}}],["requirement",{"0":{"39":1}}],["requirements",{"0":{"0":2,"1":3,"2":4,"3":1,"4":1,"6":1,"9":3,"10":1,"16":1,"17":1,"37":2,"40":5,"50":1,"56":2,"58":1}}],["require",{"0":{"2":1,"4":1,"15":1,"24":1,"27":2,"28":4,"52":1,"57":1}}],["requires",{"0":{"2":4,"12":1,"28":1,"56":1}}],["required",{"0":{"0":1,"2":3,"3":2,"6":3,"10":1,"15":2,"16":1,"17":1,"22":1,"39":4,"61":2}}],["441",{"0":{"63":1}}],["443",{"0":{"11":1,"68":1}}],["46",{"0":{"63":1}}],["474a",{"0":{"68":2}}],["472",{"0":{"63":1}}],["47",{"0":{"57":1}}],["47h",{"0":{"15":1}}],["48",{"0":{"57":2}}],["4c",{"0":{"57":1}}],["4f",{"0":{"57":1}}],["4ff4",{"0":{"56":1}}],["4fbd",{"0":{"45":1}}],["49c64bd87cbf",{"0":{"45":1}}],["400",{"0":{"63":2}}],["408",{"0":{"63":1}}],["40e9",{"0":{"45":1}}],["409d",{"0":{"45":2}}],["417e",{"0":{"26":1}}],["41cca1b4822b",{"0":{"20":1,"45":1}}],["4b21",{"0":{"26":2}}],["4xlarge",{"0":{"24":1}}],["42c9",{"0":{"20":1,"45":1}}],["4ed9",{"0":{"12":2}}],["4",{"0":{"0":3,"2":1,"11":1,"18":1,"26":3,"40":1,"42":1,"56":2,"67":1}}],["num",{"0":{"63":1}}],["number",{"0":{"23":1,"24":3,"56":1,"57":4,"59":1}}],["null",{"0":{"56":2}}],["nitro",{"0":{"24":2}}],["ns=",{"0":{"38":1}}],["nslookup",{"0":{"27":2}}],["ns",{"0":{"4":1,"12":1,"14":1}}],["navigating",{"0":{"23":1}}],["navigate",{"0":{"5":1,"7":1,"14":1,"22":1,"40":2,"48":1,"52":3}}],["na",{"0":{"4":1}}],["naming",{"0":{"2":1}}],["name=fill",{"0":{"53":1}}],["name=yarn",{"0":{"50":1}}],["name=",{"0":{"21":2}}],["name=emeadev",{"0":{"15":1}}],["named",{"0":{"12":1,"15":2,"55":1,"57":1}}],["names",{"0":{"3":2,"42":1,"45":1,"61":1}}],["namespace=",{"0":{"14":1,"29":4,"43":2}}],["namespaces",{"0":{"8":2,"14":2}}],["namespace",{"0":{"2":1,"8":3,"15":5,"27":1,"29":4,"38":1,"43":1,"45":5,"54":1,"56":3,"61":1,"67":3}}],["name",{"0":{"2":13,"4":3,"7":1,"9":2,"12":2,"14":1,"15":9,"18":1,"20":6,"22":1,"23":1,"26":6,"27":4,"29":2,"30":1,"32":9,"33":4,"34":5,"39":1,"43":1,"45":14,"52":1,"53":5,"56":5,"57":3,"59":1,"62":1,"67":1}}],["nginx",{"0":{"2":2,"6":2,"15":1,"17":1}}],["n",{"0":{"2":1,"8":8,"14":4,"15":6,"27":3,"38":2,"45":5,"49":6,"54":1,"56":41,"67":6}}],["nocerts",{"0":{"57":2}}],["nocrl",{"0":{"57":2}}],["nokeys",{"0":{"57":4}}],["nov",{"0":{"57":2}}],["noout",{"0":{"35":1,"57":8}}],["normal",{"0":{"34":2}}],["none",{"0":{"34":2}}],["non",{"0":{"25":1,"57":1}}],["nonce",{"0":{"13":1}}],["no",{"0":{"2":1,"4":2,"18":1,"25":1,"34":1,"37":1,"49":1,"56":4,"57":1,"67":2}}],["notion",{"0":{"56":1}}],["notifications",{"0":{"23":1}}],["notification",{"0":{"22":1,"23":1}}],["nothing",{"0":{"56":1,"61":1}}],["notes",{"0":{"33":1,"39":1,"47":13,"57":2}}],["note",{"0":{"4":1,"7":1,"8":1,"12":2,"22":3,"29":1,"52":1,"56":6,"57":6}}],["not",{"0":{"2":4,"3":3,"6":1,"8":1,"12":2,"22":3,"25":2,"28":2,"29":1,"33":1,"34":2,"39":1,"40":1,"41":1,"42":1,"46":1,"47":3,"49":1,"55":1,"56":7,"57":15,"61":3,"62":2,"64":2,"67":1,"68":2}}],["now",{"0":{"1":1,"12":1,"20":1,"22":2,"34":2,"54":1,"56":4,"57":3}}],["noel",{"0":{"1":3}}],["noderesourcegroup",{"0":{"45":1}}],["node=",{"0":{"43":1}}],["nodepool",{"0":{"20":6}}],["nodepools",{"0":{"20":3}}],["nodegroup",{"0":{"0":2,"9":2,"20":2}}],["nodes",{"0":{"0":6,"9":6,"26":1,"43":2,"57":2}}],["node",{"0":{"0":2,"20":1,"24":2,"30":1,"43":14,"45":7,"54":1,"56":1}}],["never",{"0":{"56":1}}],["necessarily",{"0":{"44":1}}],["necessary",{"0":{"7":1,"12":1,"15":1,"45":2,"47":1,"56":3,"57":1}}],["net",{"0":{"33":1}}],["network",{"0":{"12":1,"24":1,"64":1}}],["networking",{"0":{"2":1}}],["near",{"0":{"25":1}}],["negotiated",{"0":{"22":1}}],["next",{"0":{"22":1,"56":1,"57":1}}],["needs",{"0":{"14":1,"22":1,"27":1,"56":1}}],["needed",{"0":{"2":1,"3":1,"6":1,"7":1,"17":1,"34":1,"39":1,"56":3,"57":2}}],["need",{"0":{"0":1,"2":9,"3":3,"4":2,"5":2,"7":2,"8":1,"15":2,"16":1,"18":2,"20":1,"22":5,"28":1,"29":1,"32":1,"34":2,"40":2,"44":2,"45":1,"49":1,"52":1,"56":11,"57":3,"59":3,"65":1,"66":2,"67":1}}],["newer",{"0":{"56":1,"63":1}}],["new",{"0":{"0":1,"1":1,"2":2,"4":1,"5":2,"7":1,"8":2,"9":1,"11":4,"12":4,"15":5,"18":1,"20":4,"22":3,"27":1,"28":2,"32":5,"34":2,"39":4,"40":3,"41":1,"42":3,"45":6,"46":1,"47":2,"52":3,"54":1,"56":7,"57":2,"58":3,"59":3,"61":2,"67":1}}],["640",{"0":{"63":1}}],["64",{"0":{"57":1}}],["62",{"0":{"57":1}}],["65",{"0":{"57":1}}],["68",{"0":{"57":1,"63":3}}],["68132",{"0":{"23":2}}],["67cf0ab43499",{"0":{"45":1}}],["6rvhd",{"0":{"15":1}}],["600",{"0":{"4":1}}],["6",{"0":{"0":1,"37":1,"42":1}}],["eg",{"0":{"64":1}}],["egg",{"0":{"56":1}}],["e5",{"0":{"57":1}}],["e6",{"0":{"57":1}}],["e8",{"0":{"57":1}}],["e3",{"0":{"57":1}}],["equivalent",{"0":{"61":1}}],["equals",{"0":{"56":1}}],["eqqmwqdt11mtlj3feojglrymfo6tit",{"0":{"57":1}}],["edeploying",{"0":{"56":1}}],["editor",{"0":{"56":3}}],["edited",{"0":{"40":1}}],["editing",{"0":{"16":1,"25":1,"40":1}}],["edit",{"0":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":1,"15":1,"16":1,"17":1,"18":1,"19":1,"20":1,"21":1,"22":2,"23":1,"24":1,"25":2,"26":1,"27":1,"28":1,"29":1,"30":1,"31":2,"32":1,"33":1,"34":1,"35":1,"36":1,"37":1,"38":1,"39":1,"40":2,"41":1,"42":1,"43":1,"44":1,"45":6,"46":1,"47":2,"48":1,"49":1,"50":1,"51":1,"52":1,"53":1,"54":1,"55":1,"56":3,"57":2,"58":1,"59":1,"60":1,"61":1,"62":1,"63":1,"64":1,"65":1,"66":1,"67":3,"68":1,"69":1}}],["eval",{"0":{"56":2}}],["ever",{"0":{"56":1}}],["everything",{"0":{"2":1,"28":1,"57":1}}],["every",{"0":{"2":2,"15":2,"23":1,"24":1,"28":1,"40":1,"42":1,"62":1}}],["even",{"0":{"40":1}}],["eventhandler",{"0":{"54":1}}],["event",{"0":{"49":1}}],["eventually",{"0":{"28":1}}],["events",{"0":{"22":1,"23":1,"46":1}}],["eugine",{"0":{"56":1}}],["eye",{"0":{"56":1}}],["errorcode",{"0":{"30":1}}],["errors",{"0":{"30":1,"61":1}}],["error",{"0":{"29":1,"56":2,"57":3,"61":1,"64":2,"68":2}}],["erd",{"0":{"13":1}}],["eof",{"0":{"26":2}}],["estimating",{"0":{"24":1}}],["echo",{"0":{"8":2,"46":2}}],["ec2",{"0":{"4":1,"24":3}}],["elasticsearch",{"0":{"49":3}}],["elasticache",{"0":{"49":1}}],["elsewhere",{"0":{"44":1,"57":1}}],["else",{"0":{"28":1}}],["elb",{"0":{"6":1,"24":1}}],["elt",{"0":{"4":1}}],["electronic",{"0":{"2":1}}],["ep",{"0":{"4":1}}],["easiest",{"0":{"56":1}}],["easy",{"0":{"41":1,"56":1}}],["eastus",{"0":{"45":1}}],["east",{"0":{"2":3,"18":2,"20":6,"26":1,"33":1,"45":1,"56":2}}],["earlier",{"0":{"22":1}}],["eat",{"0":{"7":1,"8":1}}],["each",{"0":{"1":1,"2":2,"3":2,"8":1,"9":2,"10":1,"15":1,"17":1,"24":1,"25":1,"56":3,"61":6}}],["ef50501e3a41",{"0":{"12":2}}],["effective",{"0":{"61":1}}],["effect",{"0":{"2":5,"9":2,"32":2,"53":2}}],["efs",{"0":{"2":6,"3":2,"9":1,"16":1}}],["empty",{"0":{"7":1}}],["email=",{"0":{"14":1}}],["email",{"0":{"2":2,"14":1,"22":3,"35":1,"41":1}}],["emeaelmdmprdtidmdmextractionve",{"0":{"29":1}}],["emea",{"0":{"2":2,"7":1}}],["etc",{"0":{"2":4,"4":2,"6":1,"7":1,"12":1,"17":1,"27":1,"46":2,"53":1,"64":1,"65":1}}],["e",{"0":{"2":5,"4":1,"6":2,"15":1,"18":1,"28":2,"29":2,"30":2,"32":2,"44":1,"45":11,"46":1,"57":3,"59":1}}],["export",{"0":{"41":1}}],["expire",{"0":{"32":2,"57":2}}],["explore",{"0":{"29":3}}],["extract",{"0":{"57":1}}],["extraction",{"0":{"29":1}}],["extra",{"0":{"34":1}}],["extension",{"0":{"25":3}}],["extensions",{"0":{"13":1,"25":2,"40":2,"56":3}}],["external",{"0":{"0":1,"2":3,"3":1,"6":2,"9":1,"15":1,"18":3,"44":1,"68":1}}],["exception=oserror",{"0":{"64":1}}],["exceeding",{"0":{"43":1}}],["excel",{"0":{"25":1}}],["excerpt",{"0":{"23":1}}],["exact",{"0":{"40":1}}],["exactly",{"0":{"22":1}}],["examples",{"0":{"29":2,"49":1}}],["example",{"0":{"2":2,"4":1,"7":1,"9":3,"11":1,"15":1,"21":1,"22":1,"24":1,"32":1,"33":1,"54":1,"56":1,"57":2,"62":1}}],["exist",{"0":{"49":1,"56":2}}],["existing",{"0":{"10":1,"15":1,"17":1,"20":2,"34":1,"56":1}}],["exists",{"0":{"8":1,"45":1,"67":1}}],["exit",{"0":{"4":1,"56":1,"61":3,"63":1,"64":1}}],["execution",{"0":{"61":1}}],["execute",{"0":{"56":2,"63":3}}],["executed",{"0":{"23":1}}],["executor",{"0":{"21":1}}],["executors",{"0":{"7":1}}],["exec",{"0":{"15":1,"23":1,"26":1,"27":1,"38":1,"54":1,"56":1,"65":1}}],["exe",{"0":{"4":1}}],["eksctl",{"0":{"14":9}}],["eks",{"0":{"2":8,"4":1,"9":1,"10":2,"14":11,"15":3,"17":2,"26":2,"62":3}}],["engaging",{"0":{"56":1}}],["engineers",{"0":{"2":1}}],["engine",{"0":{"2":1,"9":1,"63":6}}],["enjoy",{"0":{"40":1}}],["enhancements",{"0":{"28":1,"47":2}}],["enis",{"0":{"24":1}}],["eni",{"0":{"24":1}}],["endpoint",{"0":{"33":1}}],["endpoints",{"0":{"2":1}}],["ends",{"0":{"22":1}}],["end",{"0":{"22":2,"40":1,"56":2,"57":4}}],["encrypt",{"0":{"15":1,"35":1,"57":1}}],["encrypted",{"0":{"3":1,"15":1,"16":1}}],["en",{"0":{"13":1}}],["entire",{"0":{"57":1}}],["entity",{"0":{"13":1}}],["entry",{"0":{"56":1,"61":1,"66":1}}],["entra",{"0":{"52":1}}],["entries",{"0":{"0":1,"23":1}}],["enters",{"0":{"22":1}}],["enter",{"0":{"7":1,"15":1,"56":1,"57":1}}],["envoy",{"0":{"68":1}}],["environmet",{"0":{"29":1}}],["environments",{"0":{"3":1,"16":2,"23":1,"30":1,"40":1,"56":1,"57":1,"62":2,"68":1}}],["environment",{"0":{"2":3,"7":1,"9":2,"14":1,"21":1,"22":1,"26":1,"29":3,"33":1,"40":2,"49":3,"53":1,"56":3,"58":1,"61":1,"62":3,"64":1}}],["enviornment",{"0":{"26":1}}],["envs",{"0":{"25":2}}],["env=",{"0":{"21":1}}],["env",{"0":{"3":2,"16":2,"42":1,"52":1,"53":1,"57":3}}],["enable",{"0":{"2":2,"5":2,"9":1,"20":1,"42":1,"49":1,"56":1}}],["enabled",{"0":{"0":2,"2":1,"9":1,"22":1,"24":1,"29":1,"56":1}}],["ens2",{"0":{"29":1}}],["ensure",{"0":{"2":1,"4":1,"5":2,"12":1,"61":1}}],["ensembledev",{"0":{"15":3,"24":1}}],["ensembletest",{"0":{"2":1,"6":1,"53":1}}],["ensemble",{"0":{"1":2,"2":8,"6":1,"7":2,"9":2,"15":1,"17":2,"53":1}}],["either",{"0":{"0":1,"2":2,"34":1}}],["1816",{"0":{"63":1}}],["187",{"0":{"4":1}}],["13",{"0":{"68":2}}],["1335",{"0":{"63":1}}],["138",{"0":{"4":2,"56":1}}],["1d",{"0":{"57":1}}],["1b5ea827f87280f98620dccc1600727c",{"0":{"56":1}}],["1x",{"0":{"56":2}}],["1e9",{"0":{"54":1}}],["1636",{"0":{"63":1}}],["16384",{"0":{"46":1}}],["160",{"0":{"27":1}}],["16",{"0":{"25":1,"67":1}}],["15000",{"0":{"68":1}}],["150",{"0":{"27":1}}],["15",{"0":{"25":1,"57":1,"68":2}}],["157",{"0":{"4":2,"56":1}}],["115",{"0":{"68":2}}],["1151",{"0":{"63":1}}],["11",{"0":{"42":1,"57":1}}],["110",{"0":{"24":2}}],["1134",{"0":{"23":1}}],["113",{"0":{"23":1}}],["1pswd",{"0":{"12":1}}],["1password",{"0":{"11":1,"12":2,"35":1,"55":1}}],["127",{"0":{"68":1}}],["120",{"0":{"63":1}}],["1243",{"0":{"63":1}}],["1211",{"0":{"23":2}}],["12",{"0":{"11":3,"57":5}}],["123",{"0":{"4":1}}],["125ea29c302df7dbb900ed84aa85f0bb",{"0":{"2":3}}],["128",{"0":{"0":4}}],["17t13",{"0":{"68":2}}],["172",{"0":{"27":2}}],["17",{"0":{"2":3,"9":1,"32":1,"53":1}}],["1024",{"0":{"46":1}}],["100",{"0":{"8":1,"43":1}}],["100gb",{"0":{"2":1,"9":1}}],["10",{"0":{"2":3,"4":3,"9":1,"12":26,"20":1,"25":3,"27":1,"32":1,"42":1,"45":4,"53":1,"56":2,"68":9}}],["1tb",{"0":{"2":1,"9":1}}],["1960d75beac7",{"0":{"56":1}}],["19",{"0":{"2":1,"3":1}}],["1",{"0":{"0":4,"1":1,"2":4,"3":1,"5":1,"9":3,"11":1,"12":2,"15":3,"20":1,"22":3,"23":8,"24":5,"25":7,"26":1,"28":4,"33":1,"40":2,"42":1,"46":1,"54":1,"56":43,"57":4,"65":1,"67":1,"68":1}}],["14",{"0":{"0":1,"2":1,"9":1,"25":1}}],["b1",{"0":{"57":1}}],["bb",{"0":{"57":1}}],["b0",{"0":{"57":1}}],["b9c5",{"0":{"56":1}}],["b631",{"0":{"45":1}}],["b302",{"0":{"45":1}}],["b56a",{"0":{"45":1}}],["blocked",{"0":{"66":1}}],["block",{"0":{"57":2}}],["blocks",{"0":{"56":2,"57":2}}],["blobs",{"0":{"32":1}}],["blob",{"0":{"32":4,"33":3,"56":2}}],["blank",{"0":{"44":1}}],["blue",{"0":{"42":1}}],["b",{"0":{"34":1}}],["b818",{"0":{"26":2}}],["bd0f1a58014fcf446b668a876ee7df2a",{"0":{"26":1}}],["bit",{"0":{"56":1}}],["bitbucket",{"0":{"2":1,"5":6,"7":1,"42":1}}],["bi",{"0":{"42":2}}],["bin",{"0":{"26":4,"38":1,"46":4,"56":3,"63":1,"65":1}}],["billing",{"0":{"22":8,"23":4}}],["bottom",{"0":{"56":1}}],["both",{"0":{"2":1,"40":1,"52":2,"56":1,"57":1}}],["book",{"0":{"61":1}}],["boost",{"0":{"46":1}}],["bootstrap",{"0":{"37":1}}],["body",{"0":{"45":1}}],["bore",{"0":{"25":1}}],["box",{"0":{"7":1,"56":1}}],["bunch",{"0":{"57":1}}],["business",{"0":{"42":2,"56":1}}],["built",{"0":{"34":1,"47":1}}],["build",{"0":{"5":1,"11":2,"12":1,"15":1,"34":5,"40":3,"42":1,"59":3,"61":1}}],["bug",{"0":{"28":1,"47":1}}],["bump",{"0":{"28":2}}],["buckets",{"0":{"9":1}}],["bucket",{"0":{"2":21,"9":5,"32":3,"33":2,"53":10}}],["button",{"0":{"7":1,"12":1,"22":1,"25":1,"40":1,"52":1}}],["but",{"0":{"0":2,"3":1,"25":2,"28":2,"39":1,"47":2,"49":1,"56":5,"61":3,"64":1,"67":1,"68":1}}],["bad",{"0":{"61":1,"68":1}}],["bag",{"0":{"57":1}}],["bat",{"0":{"56":1}}],["baltocdn",{"0":{"46":2}}],["balboa",{"0":{"42":1}}],["balancer",{"0":{"2":2}}],["back",{"0":{"49":2,"56":7,"57":2}}],["background",{"0":{"40":1,"61":2}}],["backend",{"0":{"23":1}}],["bar",{"0":{"25":1}}],["bank",{"0":{"22":4}}],["basic",{"0":{"58":1,"65":1}}],["basically",{"0":{"56":1}}],["basis",{"0":{"21":1}}],["bash",{"0":{"15":1,"23":1,"38":2,"54":1,"56":2,"65":3}}],["bashrc",{"0":{"4":1,"56":2}}],["bastions",{"0":{"56":1}}],["bastion",{"0":{"1":2,"4":1,"26":2,"37":1,"55":1,"56":8}}],["base64",{"0":{"8":2,"41":3}}],["based",{"0":{"2":1,"10":1,"15":1,"17":1,"40":1,"52":1,"56":1}}],["base",{"0":{"0":2,"6":2,"17":2,"21":1,"35":4,"56":1,"57":5,"62":2,"63":1}}],["bhe",{"0":{"2":1}}],["breaking",{"0":{"47":2}}],["brew",{"0":{"14":2,"56":5}}],["bru",{"0":{"45":4}}],["bring",{"0":{"34":1}}],["brand",{"0":{"22":1}}],["branch",{"0":{"2":1,"5":1,"34":9,"42":5,"47":1,"55":2,"56":2,"59":1}}],["broker",{"0":{"49":1}}],["broad",{"0":{"15":1}}],["browseresource",{"0":{"56":1}}],["browse",{"0":{"1":1,"2":8,"3":1}}],["bytes",{"0":{"43":4}}],["by",{"0":{"1":2,"2":4,"3":1,"7":2,"8":1,"15":1,"18":2,"22":7,"23":2,"24":1,"28":2,"29":2,"40":2,"43":10,"47":1,"49":1,"52":1,"56":2,"61":6,"62":2,"63":1}}],["best",{"0":{"61":1}}],["beat",{"0":{"56":4}}],["bec",{"0":{"43":1}}],["because",{"0":{"2":1,"24":1,"56":1,"57":1}}],["behave",{"0":{"49":1}}],["behaviors",{"0":{"5":1}}],["behind",{"0":{"34":1}}],["better",{"0":{"25":1}}],["between",{"0":{"22":2,"56":2,"57":1,"61":2}}],["being",{"0":{"16":1,"25":1,"28":1,"40":1,"53":1}}],["begins",{"0":{"22":2}}],["begin",{"0":{"10":1,"47":1,"57":6}}],["been",{"0":{"2":1,"8":1,"17":1,"23":1,"25":1}}],["before",{"0":{"2":1,"7":1,"10":1,"15":1,"47":1,"56":3,"57":4,"59":1,"61":2}}],["below",{"0":{"2":3,"3":1,"29":1,"53":1,"57":1}}],["be",{"0":{"0":4,"1":1,"2":8,"3":4,"4":1,"5":3,"9":1,"10":2,"12":2,"15":5,"16":2,"17":1,"18":3,"20":1,"22":3,"23":1,"24":3,"28":1,"32":3,"33":1,"34":1,"39":1,"40":2,"45":3,"47":2,"49":1,"52":1,"53":1,"56":10,"57":16,"59":1,"61":11,"62":1,"67":1,"68":1}}],["md5",{"0":{"57":4}}],["md",{"0":{"47":1,"56":2,"62":1}}],["mdm",{"0":{"29":1}}],["mc",{"0":{"45":1}}],["mv",{"0":{"26":1}}],["mkdir",{"0":{"26":2,"39":1,"56":1}}],["ms",{"0":{"25":1}}],["m5",{"0":{"9":1,"24":3}}],["m",{"0":{"3":1,"4":2,"16":1,"56":1,"57":1}}],["mozilla",{"0":{"68":2}}],["moveresources",{"0":{"45":1}}],["move",{"0":{"41":1,"45":4}}],["mountpath",{"0":{"27":1}}],["mount",{"0":{"24":1,"56":1}}],["mounted",{"0":{"3":1,"10":1,"17":1}}],["monthly",{"0":{"22":1}}],["monitoring",{"0":{"23":2,"49":1}}],["monitor",{"0":{"12":1}}],["more",{"0":{"9":1,"13":1,"22":1,"23":1,"25":1,"30":1,"34":1,"40":1,"41":1,"45":1,"50":1,"57":1,"61":2,"67":2}}],["module",{"0":{"61":3,"63":1}}],["modulus",{"0":{"57":6}}],["modified",{"0":{"22":1,"39":3,"45":1,"57":1}}],["modifies",{"0":{"22":1}}],["modify",{"0":{"5":1,"20":1,"22":3}}],["modern",{"0":{"56":2}}],["mode",{"0":{"20":1,"51":3}}],["models",{"0":{"13":2,"63":3}}],["model",{"0":{"7":1,"22":1,"25":1,"61":1,"68":2}}],["most",{"0":{"3":1,"24":1,"56":1,"57":1,"61":3,"62":1,"63":1}}],["moments",{"0":{"56":1}}],["moment",{"0":{"2":1}}],["much",{"0":{"22":1,"56":1}}],["multiple",{"0":{"57":2,"61":1}}],["multibranch",{"0":{"5":1}}],["multi",{"0":{"2":2,"9":1}}],["must",{"0":{"0":4,"7":3,"10":2,"15":2,"17":1,"24":2,"32":6,"33":2,"34":2,"45":1,"46":1,"56":4,"57":1,"61":3}}],["miievaibadanbgkqhkig9w0baqefaascbkywggsiageaaoibaqclf9q17cqlowdb",{"0":{"57":1}}],["miiejtccavwgawibagiqq71eg0d4110tqpc8i8ur",{"0":{"57":1}}],["mirror",{"0":{"39":1}}],["mirrors",{"0":{"39":1}}],["missing",{"0":{"23":1}}],["migrating",{"0":{"18":1}}],["migrations",{"0":{"15":1,"56":4}}],["migrated",{"0":{"18":1}}],["migrate",{"0":{"15":1,"56":1}}],["might",{"0":{"2":2,"40":2,"59":1,"66":1}}],["mixin",{"0":{"13":1}}],["mimimize",{"0":{"2":1}}],["microsoft",{"0":{"1":1,"4":1,"12":1,"20":2,"45":1,"52":1,"56":1}}],["minavailable",{"0":{"56":16}}],["minute",{"0":{"40":1,"57":1}}],["minio",{"0":{"33":4}}],["minimum",{"0":{"0":1,"2":1,"9":1,"14":1,"34":1}}],["minor",{"0":{"28":6}}],["mind",{"0":{"2":1,"18":1}}],["min",{"0":{"0":3,"9":3,"20":1,"56":2}}],["message",{"0":{"68":2}}],["mechanism",{"0":{"61":1}}],["mechatroner",{"0":{"25":1}}],["meat",{"0":{"61":1}}],["measued",{"0":{"43":1}}],["means",{"0":{"22":1,"45":1,"61":2}}],["memberships",{"0":{"52":1}}],["memtotal",{"0":{"43":1}}],["memavailable",{"0":{"43":1}}],["memory",{"0":{"43":6,"61":3}}],["memorypressure",{"0":{"43":1}}],["menu",{"0":{"25":1}}],["method",{"0":{"61":1,"68":1}}],["methods",{"0":{"61":1}}],["metadata",{"0":{"22":1,"27":1,"45":2,"49":2}}],["metered",{"0":{"22":2}}],["metrics",{"0":{"11":5,"14":3,"56":2}}],["medical",{"0":{"7":1}}],["merge",{"0":{"5":1,"34":1,"58":1}}],["me",{"0":{"1":1}}],["my",{"0":{"4":2,"11":3,"22":1,"29":5,"56":1}}],["myworkspaces",{"0":{"1":1}}],["mycompany",{"0":{"0":3}}],["matter",{"0":{"56":1}}],["mate",{"0":{"40":1}}],["matched",{"0":{"57":1}}],["matches",{"0":{"57":1}}],["match",{"0":{"22":1,"47":1,"57":3}}],["making",{"0":{"34":1,"56":1}}],["makes",{"0":{"56":1}}],["make",{"0":{"2":1,"3":2,"7":1,"15":1,"25":1,"28":2,"34":4,"39":2,"47":3,"49":1,"52":1,"56":8,"57":6,"59":1,"61":2,"63":2}}],["major",{"0":{"28":7}}],["mapping",{"0":{"57":1}}],["map",{"0":{"27":1}}],["macintosh",{"0":{"68":2}}],["macs",{"0":{"56":1}}],["mac",{"0":{"14":1,"68":2}}],["machine",{"0":{"3":1,"6":1,"15":1,"39":2,"41":2,"46":2,"56":1}}],["martin",{"0":{"7":1}}],["marked",{"0":{"16":1}}],["marks",{"0":{"3":1,"22":2}}],["mark",{"0":{"3":1}}],["may",{"0":{"3":1,"56":1,"57":2}}],["maintenance",{"0":{"51":3}}],["maintain",{"0":{"4":1,"12":1}}],["main",{"0":{"3":1,"7":1,"16":1,"25":1,"30":1,"34":2,"39":1,"46":2,"56":8,"57":1,"61":1,"63":3,"64":1}}],["mail",{"0":{"2":1}}],["master",{"0":{"2":4,"9":2,"18":1,"49":2}}],["manifest",{"0":{"34":1}}],["managing",{"0":{"2":1,"3":1,"14":2,"40":1,"61":1}}],["manages",{"0":{"15":1}}],["manage",{"0":{"5":1,"7":1,"13":1,"15":2,"20":1,"22":1,"47":1,"49":1,"54":1,"56":2,"62":1}}],["management",{"0":{"2":1,"32":2}}],["managed",{"0":{"2":4,"3":1,"8":1,"22":2}}],["manager",{"0":{"0":1,"2":3,"15":4,"54":1}}],["manually",{"0":{"2":2,"22":3,"42":2,"55":1,"56":1,"57":1}}],["manual",{"0":{"2":2,"61":1}}],["maxconcurrentreconciles",{"0":{"61":1}}],["maximum",{"0":{"2":1,"9":1,"24":2,"29":1}}],["max",{"0":{"0":3,"9":3,"20":1,"24":3,"46":3,"54":1,"56":2}}],["vulnerabilities",{"0":{"50":1}}],["v",{"0":{"29":1}}],["v0",{"0":{"26":2,"46":1}}],["vsc",{"0":{"25":1}}],["vscode",{"0":{"25":5}}],["vsix",{"0":{"25":13,"40":1}}],["vs",{"0":{"25":16,"56":1}}],["vm",{"0":{"20":1,"56":2}}],["vma",{"0":{"7":2}}],["v2",{"0":{"14":1,"49":1}}],["vpcxeksrole",{"0":{"26":1}}],["vpcx",{"0":{"5":2}}],["vpn",{"0":{"1":1,"12":5,"56":4,"57":1}}],["venv",{"0":{"56":3,"57":1}}],["vendor",{"0":{"4":1}}],["ve",{"0":{"16":1}}],["verified",{"0":{"57":1}}],["verification",{"0":{"57":1}}],["verifying",{"0":{"56":1}}],["verify",{"0":{"2":1,"12":1,"49":1,"56":2,"57":7}}],["very",{"0":{"12":1,"34":2,"49":1,"56":2,"67":1}}],["versioning",{"0":{"28":3}}],["versions",{"0":{"2":1,"18":1,"25":1,"32":2,"56":3,"65":2}}],["version",{"0":{"0":2,"2":6,"9":2,"11":1,"15":2,"24":4,"28":4,"32":1,"34":1,"39":1,"53":1,"56":14,"57":5,"58":2,"61":2,"63":1,"64":1,"65":1,"67":2}}],["vary",{"0":{"57":1}}],["varies",{"0":{"56":2}}],["variant",{"0":{"22":2}}],["variables",{"0":{"21":1,"52":1,"53":1}}],["variable",{"0":{"3":1,"64":1}}],["vars",{"0":{"25":1}}],["validating",{"0":{"44":1}}],["validations",{"0":{"25":1}}],["validated",{"0":{"44":1}}],["validate",{"0":{"42":1}}],["validity",{"0":{"22":1,"57":4}}],["valid",{"0":{"7":2,"8":1}}],["valueerror",{"0":{"63":1}}],["value=",{"0":{"21":1}}],["values",{"0":{"3":3,"16":2,"32":3,"46":2,"54":1,"61":3,"67":8,"68":2}}],["value",{"0":{"0":3,"3":1,"8":3,"9":3,"27":2,"52":1,"61":2,"63":1,"67":1}}],["v1beta1",{"0":{"26":1}}],["v1envvar",{"0":{"21":1}}],["v1container",{"0":{"21":1}}],["v1podspec",{"0":{"21":1}}],["v1pod",{"0":{"21":1}}],["v1",{"0":{"2":1,"3":1,"14":3,"26":1,"27":1,"45":2,"54":1,"56":1,"61":1}}],["virtual",{"0":{"46":1}}],["virtualnetworkgateways",{"0":{"12":1}}],["visibility",{"0":{"18":1}}],["vi",{"0":{"4":1,"57":2,"67":1}}],["view",{"0":{"56":1}}],["viewers",{"0":{"7":1}}],["viewer",{"0":{"2":1}}],["viewpage",{"0":{"1":1}}],["via",{"0":{"0":1,"2":1,"12":1,"22":1,"56":1}}],["volumename",{"0":{"45":1}}],["volumehandle",{"0":{"45":1}}],["volumemounts",{"0":{"27":1}}],["volumes",{"0":{"23":1,"24":3,"27":1,"45":4,"49":1}}],["volume",{"0":{"0":3,"3":1,"9":3,"16":1,"24":3,"45":11,"49":1,"56":1}}],["volumed",{"0":{"0":4,"2":1,"9":3}}],["v3",{"0":{"0":2,"20":1,"56":2}}],["v5",{"0":{"0":2}}],["d2",{"0":{"57":1}}],["dh",{"0":{"49":1}}],["drop",{"0":{"49":1}}],["draft",{"0":{"34":1}}],["driver",{"0":{"2":4,"3":2,"45":1}}],["dnr240",{"0":{"29":2}}],["dns",{"0":{"0":4,"2":4,"6":1,"9":1,"12":2,"15":2,"27":3,"35":4,"68":1}}],["django",{"0":{"13":1,"18":1,"22":2,"31":1,"44":1,"50":1}}],["dcmaster",{"0":{"18":2}}],["dcp",{"0":{"15":2}}],["dco",{"0":{"11":5}}],["dcw",{"0":{"2":1,"29":7,"38":1,"43":1,"45":5,"49":6,"56":4,"67":3}}],["duration",{"0":{"68":1}}],["during",{"0":{"3":1,"22":2}}],["duplicate",{"0":{"25":1}}],["dump",{"0":{"18":4}}],["due",{"0":{"2":1,"5":1,"61":2,"64":1}}],["dir",{"0":{"64":1}}],["directions",{"0":{"57":1}}],["directly",{"0":{"22":1,"34":3,"61":1}}],["directory",{"0":{"2":1,"4":2,"6":1,"10":1,"14":1,"15":3,"17":2,"34":1,"47":4,"52":1,"56":6,"57":6,"62":3}}],["dill",{"0":{"63":6}}],["did",{"0":{"62":1}}],["digicert",{"0":{"57":10}}],["dict",{"0":{"22":1}}],["diagram",{"0":{"13":1}}],["diffing",{"0":{"61":1}}],["difference",{"0":{"56":1}}],["different",{"0":{"7":1,"22":1,"33":1,"56":1,"57":1,"61":2,"68":1}}],["diff",{"0":{"3":1,"16":1,"34":1,"39":3}}],["disruption",{"0":{"56":2}}],["disruptions",{"0":{"2":1,"56":4}}],["distinction",{"0":{"61":1}}],["distinguish",{"0":{"22":1}}],["distributed",{"0":{"54":1}}],["disable",{"0":{"49":1,"56":3}}],["discounted",{"0":{"22":1}}],["displayed",{"0":{"15":1}}],["disks",{"0":{"45":1}}],["diskpressure",{"0":{"43":1}}],["disk",{"0":{"0":1,"45":10,"56":1}}],["division",{"0":{"2":5}}],["dbshell",{"0":{"49":1}}],["db",{"0":{"2":4,"3":5,"9":2,"16":4,"18":5,"57":1,"68":1}}],["dbtrunnerresult",{"0":{"30":2,"64":1}}],["dbtrunner",{"0":{"30":2,"64":3}}],["dbt",{"0":{"2":6,"3":1,"9":2,"12":2,"25":2,"28":4,"30":7,"42":7,"53":3,"56":3,"58":3,"64":10,"65":3}}],["dagrun",{"0":{"63":3}}],["dag",{"0":{"58":1,"63":7}}],["dags",{"0":{"2":6,"9":1,"56":1,"63":1}}],["days",{"0":{"32":5}}],["dashboard",{"0":{"5":1,"14":10}}],["dates",{"0":{"22":1}}],["date",{"0":{"3":1,"22":1,"29":1,"44":1,"57":3}}],["datasource",{"0":{"29":2}}],["datahubproject",{"0":{"67":1}}],["datahub",{"0":{"22":1,"28":3,"49":11,"67":8}}],["data",{"0":{"2":2,"3":1,"5":1,"8":1,"16":1,"18":2,"22":1,"26":1,"27":1,"32":1,"42":1,"49":2,"54":1,"56":1,"57":4}}],["datacove",{"0":{"2":2,"4":3,"26":1}}],["datacoveslocal",{"0":{"22":2,"33":1,"59":3,"62":2}}],["datacovesgateway",{"0":{"12":1}}],["datacoves",{"0":{"0":8,"1":1,"2":31,"3":9,"4":8,"6":6,"7":17,"9":3,"10":3,"12":22,"15":15,"16":3,"17":7,"18":14,"20":11,"22":16,"23":1,"25":17,"28":4,"37":1,"39":9,"42":1,"45":1,"46":2,"47":2,"51":1,"52":4,"53":2,"56":32,"57":16,"59":1,"61":1,"62":1,"65":3,"68":2}}],["databases",{"0":{"2":2,"18":1}}],["database",{"0":{"0":2,"2":10,"9":1,"15":1,"18":7,"61":1}}],["d",{"0":{"2":1,"7":1,"8":2,"18":3,"35":2,"41":2,"46":2,"50":1,"65":1}}],["dorzey",{"0":{"25":1}}],["doing",{"0":{"22":1,"34":1}}],["doesn",{"0":{"23":1,"57":2}}],["does",{"0":{"22":2,"56":2,"61":1,"64":1,"67":1,"68":1}}],["doubt",{"0":{"3":1}}],["downgrade",{"0":{"56":1}}],["down",{"0":{"22":1,"56":2}}],["downtimes",{"0":{"2":1}}],["downloaded",{"0":{"12":1,"47":2}}],["download",{"0":{"1":1,"2":1,"8":1,"11":3,"12":2,"14":1,"20":1,"26":2,"34":3,"47":1,"56":1}}],["done",{"0":{"2":1,"7":1,"17":1,"34":1,"38":1,"49":1,"52":1,"56":5,"57":2}}],["don",{"0":{"2":1,"5":1,"22":2,"34":4,"40":1,"44":1,"57":1}}],["do",{"0":{"2":3,"3":2,"7":1,"22":2,"23":1,"28":3,"29":1,"34":6,"38":1,"44":1,"46":1,"47":1,"52":1,"56":9,"57":2,"59":1,"62":2,"63":2,"67":2}}],["doc",{"0":{"46":1}}],["document",{"0":{"15":2,"22":1,"47":1,"56":3,"57":1}}],["documented",{"0":{"3":2,"8":1,"56":2}}],["documentation",{"0":{"2":1,"27":1,"39":1,"56":3,"61":1,"62":1}}],["dockerconfigjson",{"0":{"8":1}}],["docker",{"0":{"2":2,"4":2,"5":1,"7":1,"8":11,"14":8,"15":3,"28":1,"34":1,"46":3,"57":1,"58":2,"62":1,"65":6,"66":6}}],["docs",{"0":{"1":1,"2":3,"3":1,"12":2,"14":4,"21":1,"24":1,"30":1,"42":6,"56":9,"57":2,"62":3}}],["domain=ensembletest",{"0":{"17":1,"39":1}}],["domain=ensembledev",{"0":{"15":1}}],["domain=fill",{"0":{"6":1}}],["domain",{"0":{"0":6,"2":3,"3":3,"6":3,"10":1,"12":1,"15":6,"17":4,"23":2,"29":2,"33":1,"35":8,"39":2,"51":2,"52":1,"57":1,"62":1}}],["derived",{"0":{"61":2}}],["de",{"0":{"57":1}}],["delivered",{"0":{"47":1}}],["deliver",{"0":{"47":1}}],["delegated",{"0":{"52":1}}],["delegation",{"0":{"24":1}}],["deleted",{"0":{"25":1,"45":2,"49":2}}],["delete",{"0":{"8":2,"23":1,"32":3,"45":7,"47":2,"49":6,"56":1,"57":1,"63":1}}],["deleteobjectversion",{"0":{"2":2,"9":1,"32":1,"53":1}}],["deleteobject",{"0":{"2":2,"9":1,"32":1,"53":1}}],["deal",{"0":{"44":1}}],["debian",{"0":{"46":2}}],["deb",{"0":{"46":2}}],["debt",{"0":{"22":1}}],["debugging",{"0":{"64":1,"65":1}}],["debug",{"0":{"21":1,"29":1,"30":2,"35":2,"65":1}}],["detached",{"0":{"46":1}}],["detailed",{"0":{"23":1}}],["detail",{"0":{"4":1}}],["details",{"0":{"2":2,"4":1,"9":1,"32":1,"56":3,"68":1}}],["determine",{"0":{"52":1,"61":1}}],["determined",{"0":{"22":1}}],["detected",{"0":{"61":2}}],["detection",{"0":{"61":2}}],["detects",{"0":{"61":1}}],["detect",{"0":{"15":1}}],["decisions",{"0":{"63":2}}],["decrypt",{"0":{"15":2}}],["declined",{"0":{"5":1}}],["defaulted",{"0":{"32":3}}],["defaults",{"0":{"21":1}}],["default",{"0":{"8":6,"25":1,"40":1,"45":2,"56":3}}],["defined",{"0":{"34":1,"61":1}}],["define",{"0":{"2":2}}],["definitions",{"0":{"62":1}}],["definition",{"0":{"2":1}}],["deprecated",{"0":{"28":1}}],["deprecate",{"0":{"28":1}}],["dependency",{"0":{"34":1}}],["dependencies",{"0":{"5":1,"6":2,"17":2,"28":1,"34":1,"46":1}}],["depending",{"0":{"22":1,"28":1,"29":1,"42":1}}],["depend",{"0":{"12":1}}],["deps",{"0":{"12":1,"30":2}}],["deployed",{"0":{"15":2}}],["deploying",{"0":{"6":1,"15":4,"56":1}}],["deploy",{"0":{"3":2,"6":1,"14":1,"15":3,"39":1,"42":1,"56":2,"58":1}}],["deploys",{"0":{"2":1}}],["deployments",{"0":{"12":1,"15":2,"56":2,"61":1}}],["deployment",{"0":{"1":1,"3":5,"4":2,"6":2,"10":3,"12":1,"15":6,"17":3,"27":4,"37":1,"39":7,"56":4,"61":1}}],["destination",{"0":{"45":3}}],["desired",{"0":{"30":1,"47":1,"61":1}}],["describing",{"0":{"61":1}}],["describes",{"0":{"15":1,"45":1}}],["describe",{"0":{"4":1,"14":2,"47":1,"56":1,"57":1}}],["described",{"0":{"2":1,"22":1,"56":1}}],["descriptions",{"0":{"22":1}}],["description",{"0":{"2":2}}],["desktop",{"0":{"1":1,"4":2}}],["devs",{"0":{"59":1}}],["dev001",{"0":{"38":1}}],["devops",{"0":{"20":1,"62":1}}],["devices",{"0":{"7":1}}],["devusr",{"0":{"5":1,"7":1}}],["dev123",{"0":{"2":2,"3":1,"12":9,"16":2,"18":1,"45":5,"49":1,"56":8,"67":6,"68":2}}],["developers",{"0":{"22":1}}],["developer",{"0":{"2":1,"7":9,"22":3,"62":1}}],["development",{"0":{"2":3,"26":1,"33":2,"39":1,"56":2}}],["dev",{"0":{"1":2,"2":2,"5":1,"7":9,"13":1,"56":1,"62":1}}],["d16s",{"0":{"0":1}}],["d4s",{"0":{"0":2,"20":1}}],["d4ds",{"0":{"0":1}}],["g2",{"0":{"57":5}}],["gmt",{"0":{"57":10}}],["g1",{"0":{"57":6}}],["gain",{"0":{"56":1}}],["gateway",{"0":{"54":1}}],["gave",{"0":{"46":1}}],["gnupg2",{"0":{"46":1}}],["gz",{"0":{"26":3}}],["gpg",{"0":{"41":6,"46":1}}],["gpt",{"0":{"25":1}}],["gp2",{"0":{"2":1,"9":1}}],["guitar",{"0":{"22":1}}],["guide",{"0":{"2":1,"3":1,"23":1,"45":1,"52":1}}],["global",{"0":{"5":1,"7":1,"57":5,"62":1}}],["gt",{"0":{"4":4,"5":7,"8":7,"11":2,"12":4,"14":3,"18":2,"22":1,"26":1,"28":1,"29":2,"30":1,"32":2,"33":5,"35":9,"41":1,"43":4,"45":14,"51":4,"56":1,"57":3,"59":1,"63":3,"64":9,"65":1,"67":12}}],["goroutines",{"0":{"61":3}}],["gone",{"0":{"49":1}}],["golang",{"0":{"46":1}}],["google",{"0":{"46":1}}],["good",{"0":{"34":2,"56":1}}],["got",{"0":{"22":1,"25":1,"30":1,"46":1}}],["going",{"0":{"11":1,"34":1,"40":1,"52":1,"56":2,"57":1}}],["go",{"0":{"4":1,"5":1,"7":2,"22":2,"27":1,"29":5,"31":2,"44":1,"46":3,"49":2,"52":1,"55":1,"56":4,"57":4,"61":12,"66":1}}],["g",{"0":{"2":2,"6":2,"13":1,"15":1,"28":2,"29":4,"32":2,"45":11,"46":1}}],["great",{"0":{"56":1}}],["green",{"0":{"42":1}}],["grep",{"0":{"3":1,"6":1,"14":2,"38":2,"45":1,"49":5}}],["gr7",{"0":{"26":1}}],["grid",{"0":{"25":1}}],["grace",{"0":{"61":1}}],["graph",{"0":{"13":2,"52":1}}],["graphviz",{"0":{"13":1}}],["grafana",{"0":{"2":2,"9":1,"12":1,"29":4,"31":3,"32":1,"33":4,"54":1}}],["granted",{"0":{"22":1,"52":1}}],["grants",{"0":{"2":1}}],["grant",{"0":{"2":5,"3":1,"4":1,"7":1,"18":2,"31":1}}],["group",{"0":{"2":2,"3":1,"4":1,"7":1,"12":1,"20":6,"31":2,"32":1,"45":8,"52":1,"54":1}}],["groups",{"0":{"0":1,"2":7,"4":2,"7":3,"9":1,"23":1,"31":1,"45":1,"52":4,"56":1}}],["growth",{"0":{"0":1,"22":3}}],["gives",{"0":{"64":1}}],["give",{"0":{"3":1}}],["gitsecret",{"0":{"57":1}}],["gitlab",{"0":{"42":1}}],["gitignore",{"0":{"42":1}}],["git",{"0":{"2":3,"3":16,"4":2,"6":5,"9":1,"10":1,"15":10,"16":11,"17":5,"28":1,"29":7,"34":6,"39":20,"41":1,"42":3,"44":1,"56":4,"57":10,"62":1}}],["githubusercontent",{"0":{"14":1}}],["github",{"0":{"0":1,"1":1,"2":1,"3":1,"4":1,"5":1,"6":1,"7":1,"8":1,"9":1,"10":1,"11":1,"12":1,"13":1,"14":2,"15":1,"16":1,"17":1,"18":1,"19":1,"20":1,"21":1,"22":1,"23":1,"24":1,"25":2,"26":3,"27":1,"28":2,"29":1,"30":1,"31":1,"32":1,"33":1,"34":3,"35":2,"36":1,"37":1,"38":1,"39":2,"40":1,"41":1,"42":2,"43":1,"44":1,"45":1,"46":5,"47":3,"48":1,"49":1,"50":1,"51":1,"52":1,"53":1,"54":1,"55":1,"56":11,"57":3,"58":1,"59":2,"60":1,"61":1,"62":1,"63":1,"64":1,"65":1,"66":1,"67":1,"68":1,"69":1}}],["gig",{"0":{"0":1}}],["gib",{"0":{"0":1}}],["gecko",{"0":{"68":2}}],["gentle",{"0":{"61":1}}],["generation",{"0":{"61":1}}],["generating",{"0":{"22":1,"47":1,"57":1}}],["generates",{"0":{"14":1}}],["generate",{"0":{"2":1,"14":1,"28":2,"34":1,"39":2,"42":1,"52":2,"57":1,"59":2,"67":1}}],["generated",{"0":{"2":1,"28":1,"39":1,"47":4}}],["generally",{"0":{"56":1}}],["generallarge",{"0":{"20":1}}],["general",{"0":{"0":5,"2":1,"9":3,"15":1,"32":1}}],["gets",{"0":{"49":1,"61":1}}],["getdbt",{"0":{"30":1}}],["getbucketlocation",{"0":{"2":2,"9":1,"53":1}}],["getobjectversion",{"0":{"2":2,"9":1,"32":1,"53":1}}],["getobject",{"0":{"2":2,"9":1,"32":1,"53":1}}],["get",{"0":{"2":1,"4":2,"6":1,"7":1,"8":5,"12":5,"14":4,"15":4,"20":2,"26":1,"29":2,"32":1,"34":1,"35":1,"37":2,"38":1,"40":1,"45":17,"46":4,"49":3,"52":1,"56":18,"57":5,"58":1,"63":2,"65":1,"66":1,"67":3,"68":1}}],["getting",{"0":{"2":1,"35":1,"47":1}}],["geo",{"0":{"0":1}}],["ctrl",{"0":{"64":1}}],["c5",{"0":{"57":1}}],["c1",{"0":{"57":1}}],["c=us",{"0":{"57":7}}],["cb",{"0":{"57":1}}],["cbi",{"0":{"7":2}}],["cwwouzl4+aalfwj2pr+otupnjhci8stdedvmy5jtxskdal+5pgnu7zjbkfhbodgt",{"0":{"57":1}}],["c2lau2viyxn0awfucy1nywncb29rlvbyby5sb2nhbcaou2viyxn0awfuifnhc3np",{"0":{"57":1}}],["ccs",{"0":{"56":4}}],["ccc",{"0":{"2":1}}],["cfg",{"0":{"57":1}}],["cf",{"0":{"35":4,"57":1}}],["cycle",{"0":{"32":1}}],["cycles",{"0":{"2":1}}],["c6599969b",{"0":{"27":1}}],["csp",{"0":{"61":1}}],["csr",{"0":{"57":2}}],["cssperfusion",{"0":{"56":1}}],["csv",{"0":{"25":4}}],["csi",{"0":{"2":3,"3":1,"45":2}}],["cn",{"0":{"57":1}}],["cn=digicert",{"0":{"57":4}}],["cn=datacoves",{"0":{"57":1}}],["cn=",{"0":{"57":2}}],["cn=thawte",{"0":{"57":6}}],["cni",{"0":{"24":5}}],["cname",{"0":{"2":1}}],["cer",{"0":{"35":4,"57":31}}],["certfile",{"0":{"57":2}}],["certs",{"0":{"57":4}}],["certain",{"0":{"15":1,"22":1,"34":1,"38":1,"61":3,"62":1}}],["cert",{"0":{"0":1,"2":2,"15":1,"35":1,"57":2}}],["certificates",{"0":{"0":3,"2":2,"15":1,"52":1,"57":5}}],["certificate",{"0":{"0":2,"2":4,"26":1,"35":4,"37":1,"57":32}}],["ceftificates",{"0":{"35":1}}],["celery",{"0":{"23":5}}],["center",{"0":{"22":1}}],["crl2pkcs7",{"0":{"57":2}}],["criteria",{"0":{"28":2}}],["crds",{"0":{"15":1}}],["credits",{"0":{"22":2}}],["credit",{"0":{"22":7}}],["credential",{"0":{"5":2}}],["credentials",{"0":{"1":1,"3":2,"4":1,"5":2,"8":2,"11":1,"12":4,"14":2,"15":1,"18":1,"20":1,"44":1}}],["creating",{"0":{"14":2,"22":1,"56":1}}],["creation",{"0":{"2":1,"32":2}}],["creates",{"0":{"15":2,"22":5}}],["createrole",{"0":{"2":1,"18":1}}],["createdb",{"0":{"2":1,"18":1}}],["created",{"0":{"2":4,"3":1,"5":1,"10":2,"17":3,"22":2,"39":1,"40":1,"45":1,"46":1,"47":1,"52":1}}],["create",{"0":{"0":1,"2":21,"3":1,"4":3,"5":2,"6":1,"8":4,"13":1,"14":7,"15":5,"18":3,"20":1,"22":3,"30":1,"32":9,"34":3,"35":1,"40":6,"42":3,"45":4,"46":2,"53":5,"54":1,"56":2,"59":2,"62":2,"65":1}}],["c319",{"0":{"12":2}}],["ci",{"0":{"7":1,"15":1,"28":1,"42":5,"58":3,"65":1}}],["cpu",{"0":{"43":2}}],["cp",{"0":{"4":1,"56":1}}],["cd7203755386",{"0":{"68":2}}],["cd",{"0":{"3":2,"6":2,"15":1,"17":2,"26":5,"37":1,"39":4,"56":2,"57":1}}],["c",{"0":{"2":1,"4":1,"15":1,"38":1,"49":1,"61":1,"64":1,"65":2}}],["cursor",{"0":{"63":1}}],["curl",{"0":{"26":2,"46":2,"55":2,"57":2}}],["currently",{"0":{"2":1,"3":1,"24":1,"44":1,"47":1,"56":1,"61":1}}],["current",{"0":{"2":1,"15":2,"25":1,"26":1,"32":2,"46":1,"47":1,"58":1,"61":1}}],["cuts",{"0":{"25":1}}],["cust",{"0":{"7":2}}],["custom",{"0":{"5":1,"27":8,"57":1,"61":1}}],["customers",{"0":{"22":12,"27":1,"47":2,"57":1}}],["customer",{"0":{"1":1,"22":19,"28":1,"39":1,"47":4,"56":6,"57":2}}],["clcerts",{"0":{"57":2}}],["cluser",{"0":{"45":1}}],["clusters",{"0":{"1":2,"2":3,"4":1,"7":2,"14":2,"17":1,"23":1,"26":1,"55":1,"56":3}}],["cluster",{"0":{"0":2,"2":35,"3":8,"6":9,"8":2,"9":3,"10":6,"12":3,"14":8,"15":15,"17":9,"20":6,"22":1,"23":2,"26":5,"33":3,"39":4,"41":1,"45":11,"47":1,"51":3,"52":1,"53":1,"54":2,"55":2,"56":9,"57":2,"59":2,"62":12,"68":5}}],["claims",{"0":{"49":1}}],["claim",{"0":{"45":2,"49":1,"52":2}}],["class",{"0":{"2":1,"9":1}}],["cleanly",{"0":{"61":1}}],["cleaning",{"0":{"61":1}}],["cleanup",{"0":{"23":1}}],["clear",{"0":{"23":1}}],["cliv2",{"0":{"14":1}}],["cli",{"0":{"6":2,"12":2,"13":1,"14":3,"15":5,"17":2,"20":1,"22":2,"23":1,"28":1,"30":4,"34":3,"39":5,"45":2,"47":4,"49":1,"51":2,"54":1,"56":1,"57":2,"59":4,"62":1,"63":5,"64":3}}],["clicking",{"0":{"22":2,"23":1,"40":1,"52":1}}],["click",{"0":{"4":1,"7":2,"12":1,"22":2,"32":7,"40":2,"52":2,"56":4,"66":1}}],["clients",{"0":{"62":1}}],["client",{"0":{"1":1,"2":3,"3":2,"12":4,"15":1,"16":2,"26":2,"52":7,"56":6,"57":1,"62":1}}],["close",{"0":{"46":1}}],["cloud",{"0":{"46":1,"48":1,"56":2}}],["cloudflare",{"0":{"35":1}}],["cloudx",{"0":{"2":7,"3":4,"17":1,"55":2}}],["clonning",{"0":{"4":1}}],["cloned",{"0":{"3":1,"37":1}}],["clone",{"0":{"3":3,"4":2,"6":2,"15":2,"17":2,"39":2,"56":1,"57":2}}],["chrome",{"0":{"68":2}}],["chicken",{"0":{"56":1}}],["chunks",{"0":{"63":1}}],["chunk",{"0":{"43":2}}],["cherrypick",{"0":{"34":1}}],["cherry",{"0":{"34":2}}],["checked",{"0":{"40":1,"47":1,"56":4,"57":1}}],["checker",{"0":{"25":1}}],["checkboxes",{"0":{"40":1}}],["checking",{"0":{"25":1,"61":2}}],["checks",{"0":{"5":1,"22":1}}],["checkout",{"0":{"3":1,"16":1,"34":1,"39":1,"56":1,"57":1}}],["check",{"0":{"1":2,"2":1,"8":3,"9":1,"11":2,"12":1,"15":2,"22":5,"23":1,"25":1,"32":2,"37":1,"39":3,"45":1,"50":1,"56":13,"57":5}}],["chown",{"0":{"56":1}}],["choosing",{"0":{"24":1,"52":1}}],["choose",{"0":{"2":2,"4":1,"31":1,"32":1,"40":2,"53":1,"56":1}}],["chosen",{"0":{"22":1}}],["chmod",{"0":{"4":1,"26":1}}],["channels",{"0":{"61":2}}],["channel",{"0":{"56":1,"61":1}}],["changing",{"0":{"34":1,"55":1}}],["changelog",{"0":{"47":1}}],["changed",{"0":{"18":1,"34":1,"55":1,"57":2,"59":1,"61":1,"68":1}}],["change",{"0":{"4":1,"6":1,"15":1,"27":1,"34":3,"39":2,"40":1,"42":1,"47":1,"59":1,"61":5,"67":1}}],["changes",{"0":{"2":1,"3":4,"6":1,"15":2,"16":3,"22":1,"28":6,"32":2,"39":3,"47":3,"56":1,"59":1,"61":6,"64":1}}],["chatgpt",{"0":{"25":1}}],["chat",{"0":{"25":1}}],["charliermarsh",{"0":{"25":1}}],["charts",{"0":{"11":1,"56":1,"61":1}}],["chart",{"0":{"11":10,"56":1,"61":2,"67":5}}],["characters",{"0":{"2":2}}],["chain",{"0":{"2":1,"57":1}}],["chapsbx",{"0":{"2":1}}],["chap",{"0":{"1":2,"2":3,"4":2}}],["cacerts",{"0":{"57":2}}],["cached",{"0":{"39":3}}],["ca",{"0":{"57":10}}],["callbacks=false",{"0":{"63":1}}],["callback",{"0":{"63":2}}],["calling",{"0":{"61":1}}],["called",{"0":{"61":1,"62":1}}],["call",{"0":{"57":1,"61":2,"63":1,"64":1}}],["calico",{"0":{"56":4}}],["calculator",{"0":{"24":3}}],["calculate",{"0":{"22":1}}],["capacity",{"0":{"45":1}}],["capped",{"0":{"24":1}}],["carry",{"0":{"61":1}}],["careful",{"0":{"57":1,"62":1}}],["carefully",{"0":{"34":1}}],["card",{"0":{"22":5}}],["cat",{"0":{"26":1,"41":1,"46":3,"57":2}}],["cautious",{"0":{"22":1}}],["caused",{"0":{"61":1}}],["cause",{"0":{"2":1,"56":1,"57":1,"68":1}}],["cases",{"0":{"22":1}}],["case",{"0":{"0":1,"11":1,"25":1,"56":1,"57":1,"61":3,"66":1}}],["cannot",{"0":{"57":1}}],["cant",{"0":{"25":1}}],["can",{"0":{"0":1,"2":4,"3":2,"5":1,"8":1,"9":1,"10":1,"12":3,"15":1,"20":1,"22":2,"23":4,"24":2,"27":1,"32":3,"34":5,"44":1,"47":3,"49":1,"54":1,"55":1,"56":16,"57":6,"59":2,"61":3,"62":1,"66":1}}],["coves",{"0":{"56":1,"58":2}}],["cover",{"0":{"34":1,"56":1,"57":1}}],["cooked",{"0":{"40":1}}],["copied",{"0":{"35":2,"68":1}}],["copy",{"0":{"4":1,"8":1,"22":1,"29":1,"34":1,"35":1,"40":1,"41":2,"56":4}}],["co",{"0":{"25":1}}],["cold",{"0":{"25":1}}],["column",{"0":{"25":1}}],["colors",{"0":{"25":1}}],["costs",{"0":{"22":1}}],["count",{"0":{"20":2,"43":1}}],["could",{"0":{"2":1,"3":1,"5":1,"18":1,"22":1,"28":1,"47":1,"52":1,"53":1,"61":2,"68":1}}],["cortex",{"0":{"56":2}}],["corner",{"0":{"40":1,"56":2}}],["correct",{"0":{"34":1,"44":1,"45":1,"56":3,"57":4}}],["correctly",{"0":{"5":1,"56":2}}],["corresponding",{"0":{"18":1,"40":1,"61":1}}],["coredns",{"0":{"27":9,"56":2}}],["core",{"0":{"3":3,"13":1,"15":8,"16":2,"21":1,"23":1,"27":1,"28":1,"33":1,"53":1,"54":2,"56":28,"57":2,"59":1,"61":3,"62":2,"64":3,"65":1}}],["codeserver",{"0":{"44":1}}],["codeservers",{"0":{"23":1}}],["code",{"0":{"5":1,"24":1,"25":17,"34":4,"38":2,"40":6,"44":2,"45":5,"56":5,"58":3,"61":6,"62":1,"64":1,"68":2}}],["concurrently",{"0":{"61":2}}],["concurrency",{"0":{"61":2}}],["conceptualy",{"0":{"61":1}}],["concepts",{"0":{"21":1,"61":1}}],["convert",{"0":{"57":1}}],["converting",{"0":{"57":1}}],["convenient",{"0":{"41":1}}],["conventions",{"0":{"61":1}}],["convention",{"0":{"2":3,"22":1}}],["constructs",{"0":{"61":1}}],["constraints",{"0":{"24":1}}],["consent",{"0":{"52":1}}],["console",{"0":{"30":1,"53":1,"56":1}}],["consumer",{"0":{"7":1}}],["consideration",{"0":{"2":1}}],["condition=",{"0":{"43":3}}],["condition",{"0":{"2":1,"43":3}}],["connected",{"0":{"22":2}}],["connectivity",{"0":{"12":1}}],["connection",{"0":{"2":2,"12":2,"16":1,"18":1,"22":2,"42":3,"46":1}}],["connect",{"0":{"2":4,"5":2,"12":2,"18":2,"26":2,"57":1,"63":1}}],["continuing",{"0":{"47":1}}],["continuous",{"0":{"42":1}}],["continue",{"0":{"40":1}}],["control",{"0":{"15":1,"41":1,"47":1,"56":1,"61":1,"68":1}}],["controllers",{"0":{"61":2}}],["controller",{"0":{"2":1,"15":2,"61":5}}],["content",{"0":{"54":1,"69":1}}],["contenttype",{"0":{"13":1}}],["contents",{"0":{"4":1,"61":2}}],["context=itx",{"0":{"17":1}}],["context=fill",{"0":{"6":1}}],["context",{"0":{"4":1,"6":3,"8":3,"12":6,"15":2,"17":2,"20":1,"25":1,"26":3,"51":2,"56":3,"57":3,"61":1,"64":1}}],["contexts",{"0":{"4":1,"8":1,"12":1,"26":1,"56":2,"57":1}}],["contains",{"0":{"10":1,"12":1,"17":1,"29":1,"39":1}}],["contain",{"0":{"3":1,"57":2}}],["container=",{"0":{"29":2}}],["containerservice",{"0":{"56":1}}],["containers",{"0":{"27":1,"29":1,"32":1,"43":1,"56":1}}],["containers=",{"0":{"21":1}}],["container",{"0":{"2":1,"3":1,"29":2,"32":4,"33":2,"43":3,"65":1}}],["containing",{"0":{"2":1,"56":1}}],["conflunce",{"0":{"8":1}}],["confluence",{"0":{"1":1,"2":1,"15":1,"42":1}}],["confirm",{"0":{"34":1}}],["confirmed",{"0":{"2":1}}],["configmaps",{"0":{"61":1}}],["configmap",{"0":{"27":2,"61":2}}],["config2",{"0":{"26":1}}],["configs",{"0":{"2":1,"3":1,"56":1}}],["config",{"0":{"2":2,"3":5,"4":4,"6":3,"8":2,"10":2,"12":4,"14":1,"15":6,"17":4,"26":3,"27":1,"39":4,"41":1,"45":4,"47":1,"56":8,"57":7,"62":2,"66":2}}],["configuring",{"0":{"3":1,"16":1,"56":1}}],["configure",{"0":{"2":2,"3":1,"4":3,"5":4,"8":1,"14":3,"15":2,"18":1,"20":1,"22":1,"26":1,"32":2,"33":3,"35":1,"40":1,"42":1,"52":4,"53":1}}],["configured",{"0":{"0":1,"2":1,"5":1,"12":1,"15":1,"33":1,"40":1,"53":1}}],["configurations",{"0":{"1":2,"62":1}}],["configuration",{"0":{"0":3,"2":6,"3":11,"5":2,"9":2,"10":4,"15":8,"16":1,"17":3,"32":4,"33":3,"39":4,"42":1,"47":1,"52":1,"56":4,"57":1,"61":3,"62":7}}],["comes",{"0":{"56":1}}],["come",{"0":{"47":1,"49":1,"56":1}}],["combined",{"0":{"39":1,"47":3}}],["comx",{"0":{"7":3}}],["commentary",{"0":{"56":1}}],["comments",{"0":{"3":2}}],["common",{"0":{"40":1,"56":1,"62":1}}],["commited",{"0":{"39":1}}],["committed",{"0":{"34":1}}],["commits",{"0":{"34":1}}],["commiting",{"0":{"15":1}}],["commit",{"0":{"3":2,"6":1,"16":2,"28":1,"34":1,"39":5,"42":1,"57":1}}],["commands",{"0":{"20":1,"34":1,"56":2,"57":3,"62":1,"63":2,"64":2,"65":1}}],["command",{"0":{"2":1,"4":1,"6":1,"18":1,"26":1,"28":1,"30":5,"34":2,"55":1,"56":4,"57":2,"59":1,"63":3,"65":1}}],["component",{"0":{"67":1}}],["components",{"0":{"4":1,"14":1,"15":1,"62":2}}],["compose",{"0":{"61":1,"65":4}}],["complex",{"0":{"56":1}}],["complexity",{"0":{"56":1}}],["completed",{"0":{"64":1}}],["completes",{"0":{"22":1}}],["completely",{"0":{"22":1}}],["complete",{"0":{"2":1,"22":1,"32":4,"48":1,"52":1,"57":3}}],["compllicated",{"0":{"56":1}}],["complain",{"0":{"56":1}}],["comparison",{"0":{"56":1}}],["compare",{"0":{"34":1,"56":2,"57":1}}],["compatible",{"0":{"28":4}}],["compile",{"0":{"30":1}}],["comprehensive",{"0":{"22":1}}],["compute",{"0":{"45":1,"61":1}}],["computed",{"0":{"15":1}}],["compute+storage",{"0":{"0":1}}],["com",{"0":{"0":9,"1":4,"2":12,"3":2,"4":3,"6":2,"7":4,"8":1,"9":3,"11":1,"12":17,"14":6,"15":9,"17":2,"18":2,"20":6,"22":2,"24":1,"25":13,"26":4,"27":4,"30":1,"33":1,"35":1,"39":2,"41":1,"45":1,"46":3,"47":1,"50":1,"51":1,"56":12,"57":24,"59":3,"62":2,"65":1,"66":2,"68":2}}],["pkcs12",{"0":{"57":6}}],["pkcs7",{"0":{"57":2}}],["pfx",{"0":{"57":12}}],["pzeembwga1uechmvbwtjzxj0igrldmvsb3btzw50ienbmt4wpaydvqqlddvzc2fz",{"0":{"57":1}}],["physical",{"0":{"56":1}}],["pdbs",{"0":{"56":7}}],["pdb",{"0":{"56":29}}],["pc",{"0":{"48":1,"56":1}}],["pce",{"0":{"2":1,"7":3}}],["pvc",{"0":{"45":25,"49":4}}],["pv",{"0":{"45":18,"49":2}}],["p",{"0":{"39":1,"45":1,"56":18}}],["ps",{"0":{"38":1,"65":1}}],["psql",{"0":{"18":1}}],["p=",{"0":{"27":1}}],["pg",{"0":{"18":2}}],["png",{"0":{"13":1}}],["pypi",{"0":{"58":1}}],["pypa",{"0":{"37":1}}],["pygraphviz",{"0":{"13":1}}],["py",{"0":{"6":2,"13":2,"15":7,"17":2,"22":2,"25":1,"28":1,"34":3,"37":2,"39":5,"42":1,"47":4,"49":2,"51":2,"54":2,"56":2,"57":2,"59":4,"62":1,"63":28}}],["python3",{"0":{"15":1,"37":1,"46":2,"56":3,"63":28}}],["python",{"0":{"2":1,"6":1,"12":1,"17":1,"25":4,"28":1,"30":4,"37":1,"38":2,"40":3,"46":2,"56":3,"62":1,"64":2}}],["plus",{"0":{"54":1}}],["plugins",{"0":{"5":2}}],["place",{"0":{"56":1}}],["placed",{"0":{"42":1}}],["places",{"0":{"3":1,"25":1}}],["plane",{"0":{"56":1,"68":1}}],["plane=controller",{"0":{"15":1}}],["plan",{"0":{"22":5,"56":2}}],["platform",{"0":{"7":1}}],["please",{"0":{"2":9,"3":1,"7":1,"22":2,"57":1}}],["pem",{"0":{"57":5}}],["pending",{"0":{"3":1,"61":3}}],["people",{"0":{"3":1,"57":1}}],["persistentvolumeclaim",{"0":{"45":1}}],["persistentvolume",{"0":{"45":1}}],["persistentvolumereclaimpolicy",{"0":{"45":2}}],["persistent",{"0":{"45":11,"49":2}}],["personal",{"0":{"1":2}}],["period",{"0":{"22":3,"61":1}}],["permission",{"0":{"9":1,"32":1,"56":1}}],["permissions",{"0":{"4":2,"9":3,"15":1,"20":1,"22":2,"31":2,"52":8}}],["perform",{"0":{"4":1,"29":4,"56":1}}],["performance",{"0":{"0":1,"28":1,"46":2}}],["per",{"0":{"2":2,"7":1,"21":1,"22":1,"24":3,"47":1,"62":1}}],["pidpressure",{"0":{"43":1}}],["pii",{"0":{"42":2}}],["pickle",{"0":{"63":2}}],["pick",{"0":{"34":3,"44":2,"56":6}}],["pilot",{"0":{"25":1}}],["pip",{"0":{"37":4,"46":1,"50":3,"65":1}}],["pipx",{"0":{"12":1}}],["pip3",{"0":{"6":1,"13":1,"17":1,"56":1}}],["pipeline",{"0":{"5":1,"55":1}}],["pipelines",{"0":{"2":2,"3":1,"17":1,"55":1,"65":1}}],["pingid",{"0":{"2":1}}],["ping",{"0":{"2":3,"3":3,"16":2}}],["put",{"0":{"44":1,"56":1}}],["putobject",{"0":{"2":2,"9":1,"32":1,"53":1}}],["pushing",{"0":{"59":1}}],["pushed",{"0":{"28":2,"59":2}}],["push",{"0":{"3":2,"11":1,"15":1,"16":1,"34":2,"39":3,"54":1,"57":3,"59":2}}],["public",{"0":{"2":1,"12":1}}],["pulling",{"0":{"44":1}}],["pull",{"0":{"2":1,"3":1,"6":1,"8":1,"15":2,"16":2,"39":2,"56":1,"58":1}}],["purpose",{"0":{"0":1,"47":2,"56":2,"57":2}}],["panic",{"0":{"61":1}}],["panel",{"0":{"22":3,"44":1,"56":4}}],["package",{"0":{"46":1,"50":1,"61":1}}],["packages",{"0":{"46":2,"56":1,"63":28}}],["paste",{"0":{"41":2,"56":2}}],["past",{"0":{"22":1,"56":1}}],["pass",{"0":{"3":1,"16":1,"61":1}}],["password=",{"0":{"8":1,"14":1}}],["passwords",{"0":{"2":1,"57":1}}],["password",{"0":{"0":1,"1":1,"2":7,"3":1,"4":1,"8":2,"9":2,"11":2,"12":1,"14":1,"16":1,"18":2,"33":1,"57":7,"66":1}}],["pay",{"0":{"22":1}}],["payment",{"0":{"22":2}}],["payload",{"0":{"22":1,"54":1}}],["parser",{"0":{"63":1}}],["parameters",{"0":{"29":1}}],["params",{"0":{"3":3,"6":2,"33":2,"56":1,"62":5}}],["particularly",{"0":{"57":1}}],["partial",{"0":{"13":1}}],["parts",{"0":{"57":1,"61":1}}],["party",{"0":{"7":1}}],["part",{"0":{"2":1,"25":1,"47":1}}],["patch",{"0":{"27":2,"28":6,"45":1,"56":20,"67":3}}],["paths",{"0":{"57":1}}],["path",{"0":{"3":1,"27":2,"45":2,"59":1,"68":1}}],["pattern",{"0":{"2":1,"27":1}}],["page",{"0":{"2":3,"5":1,"15":1,"22":2,"40":5,"52":2}}],["pages",{"0":{"1":1}}],["port",{"0":{"68":1}}],["portal",{"0":{"12":1,"52":1,"56":1}}],["potential",{"0":{"61":1}}],["pomerium",{"0":{"57":2,"68":6}}],["populated",{"0":{"40":1}}],["popup",{"0":{"7":1}}],["power",{"0":{"25":2}}],["possibility",{"0":{"64":1}}],["possible",{"0":{"22":1,"61":1}}],["post",{"0":{"5":2,"46":1,"54":1,"56":1}}],["postgres",{"0":{"2":6,"3":2,"9":2,"18":7}}],["postgresql",{"0":{"0":1,"49":2}}],["policies",{"0":{"14":1}}],["policy",{"0":{"2":7,"45":4,"53":1}}],["pointers",{"0":{"61":1}}],["points",{"0":{"24":1,"61":1}}],["point",{"0":{"22":1,"34":1,"39":1,"56":1}}],["pointtositeconfiguration",{"0":{"12":1}}],["pointing",{"0":{"2":1,"4":1}}],["pod=",{"0":{"29":1}}],["pod=~",{"0":{"29":1,"43":1}}],["pods=$",{"0":{"38":1}}],["pods",{"0":{"8":1,"15":4,"20":1,"24":6,"38":2,"43":3,"49":2,"56":4}}],["pod",{"0":{"8":1,"13":1,"15":3,"21":2,"23":2,"24":4,"29":4,"38":1,"43":7,"49":1,"54":1,"56":4,"63":1}}],["pools",{"0":{"0":1,"56":1}}],["p10",{"0":{"0":1}}],["practice",{"0":{"34":1}}],["prd001",{"0":{"29":3,"43":1}}],["prd",{"0":{"4":3,"9":2}}],["primitive",{"0":{"61":1}}],["prices",{"0":{"22":1}}],["pricing",{"0":{"22":1}}],["priority",{"0":{"34":1}}],["prior",{"0":{"15":1,"17":1,"56":2}}],["privileges",{"0":{"14":1,"46":1}}],["privilege",{"0":{"4":1}}],["private",{"0":{"2":1,"8":1,"35":2,"41":2,"42":2,"57":4}}],["principal",{"0":{"2":1}}],["print",{"0":{"2":1,"14":2,"30":1,"38":1,"56":1,"57":2}}],["pr",{"0":{"2":1,"3":1,"42":1,"59":1}}],["precise",{"0":{"61":1}}],["preconfigured",{"0":{"40":1}}],["prevent",{"0":{"56":2}}],["previous",{"0":{"46":1,"56":1}}],["preview",{"0":{"25":1}}],["pressure",{"0":{"43":1}}],["present",{"0":{"34":2}}],["prefixing",{"0":{"59":1}}],["prefix",{"0":{"24":1,"32":3,"43":1}}],["prefer",{"0":{"56":1}}],["preferring",{"0":{"22":1}}],["preferences",{"0":{"26":1}}],["preference",{"0":{"0":1}}],["prepare",{"0":{"40":1,"56":1,"57":1}}],["prepay",{"0":{"22":1}}],["prepaid",{"0":{"22":1}}],["prerequisites",{"0":{"6":1,"15":2,"17":1}}],["pre",{"0":{"5":1,"42":1,"59":4}}],["premium",{"0":{"0":1}}],["proto",{"0":{"68":1}}],["protocol",{"0":{"11":1,"63":1}}],["problems",{"0":{"57":1,"61":1}}],["problem",{"0":{"56":1,"68":1}}],["probably",{"0":{"2":1,"12":1,"56":1,"57":1}}],["procedure",{"0":{"56":1}}],["procedures",{"0":{"56":1}}],["processes",{"0":{"38":1,"65":1}}],["process",{"0":{"8":1,"22":3,"34":1,"40":1,"47":1,"56":3,"57":1,"61":3,"63":1,"67":1}}],["proc",{"0":{"46":3}}],["prometheus",{"0":{"43":1,"54":1,"56":2}}],["produced",{"0":{"47":1}}],["product",{"0":{"22":2}}],["products",{"0":{"22":7}}],["production",{"0":{"0":1,"1":1,"2":4,"4":1,"30":1,"42":1,"59":1}}],["prod",{"0":{"42":2}}],["properly",{"0":{"39":1}}],["properties",{"0":{"32":1}}],["program",{"0":{"61":1}}],["programmatic",{"0":{"30":1}}],["programatically",{"0":{"30":1}}],["proxy",{"0":{"14":4}}],["pro001",{"0":{"2":1}}],["profiles",{"0":{"12":1,"40":3,"42":1}}],["profile",{"0":{"2":1,"12":2,"40":12,"56":15}}],["project",{"0":{"2":2,"3":1,"5":6,"15":13,"23":1,"42":1,"62":1,"64":1}}],["projects",{"0":{"1":1,"2":8,"3":1,"15":2,"23":5,"56":1}}],["providing",{"0":{"52":1}}],["provides",{"0":{"22":1}}],["providers",{"0":{"12":1,"32":1,"33":1,"45":1}}],["provider",{"0":{"2":1,"26":1,"33":5,"56":1}}],["provided",{"0":{"0":1,"1":2,"47":1,"59":1}}],["provisioner",{"0":{"3":2,"18":1}}],["provisioned",{"0":{"2":1}}],["tls",{"0":{"57":6}}],["tld",{"0":{"0":1}}],["tmux",{"0":{"46":2}}],["tsv",{"0":{"45":1}}],["turning",{"0":{"51":2}}],["turn",{"0":{"23":1,"49":2}}],["tutorial",{"0":{"14":1}}],["traceback",{"0":{"63":1}}],["track",{"0":{"61":1}}],["transport",{"0":{"46":1}}],["transition",{"0":{"22":1}}],["transfers",{"0":{"22":1}}],["transfer",{"0":{"22":2,"41":1}}],["transform",{"0":{"12":2,"56":1}}],["treat",{"0":{"61":1}}],["tree",{"0":{"34":1,"56":3,"57":1}}],["try",{"0":{"56":2,"57":2,"59":1}}],["triggerer",{"0":{"63":1}}],["triggered",{"0":{"40":1}}],["trigger",{"0":{"55":1}}],["triggering",{"0":{"28":1}}],["trial",{"0":{"22":11}}],["true",{"0":{"8":1,"18":3,"22":2,"43":3}}],["trusted",{"0":{"2":1}}],["tgz",{"0":{"11":2}}],["two",{"0":{"8":1,"15":1,"32":3,"56":2,"57":2,"61":1}}],["txt",{"0":{"6":1,"17":1,"37":1,"50":1,"56":1,"58":1}}],["t",{"0":{"2":1,"3":1,"5":1,"22":2,"23":1,"34":4,"40":1,"44":2,"56":4,"57":4,"61":3}}],["targetresourcegroup",{"0":{"45":1}}],["targeted",{"0":{"39":1,"58":1}}],["targetted",{"0":{"34":1}}],["tar",{"0":{"26":4}}],["taxes",{"0":{"22":1}}],["tally",{"0":{"22":2,"23":2}}],["tag",{"0":{"21":1,"34":6,"56":3}}],["tags",{"0":{"14":1,"28":2,"34":3,"56":1}}],["tasks",{"0":{"23":15}}],["task",{"0":{"21":1,"23":3,"29":2,"61":1,"63":5}}],["tap",{"0":{"14":3}}],["tail",{"0":{"8":1}}],["table",{"0":{"49":1}}],["tab",{"0":{"2":1,"5":3,"32":3}}],["taqy",{"0":{"2":2,"5":1,"7":5,"8":5,"65":1,"66":2}}],["take",{"0":{"2":1,"34":1,"40":1,"49":1,"56":5,"57":1,"61":1}}],["typha",{"0":{"56":2}}],["typically",{"0":{"2":1,"5":1,"22":1,"40":1,"57":1,"68":1}}],["type=",{"0":{"27":1}}],["types",{"0":{"22":2,"61":2}}],["type",{"0":{"0":2,"2":2,"4":1,"7":2,"9":1,"22":2,"24":5,"54":1}}],["tee",{"0":{"46":2}}],["text",{"0":{"35":1,"40":1,"57":3}}],["tenant",{"0":{"26":1,"52":5,"56":2}}],["temporary",{"0":{"57":1}}],["temporarily",{"0":{"56":1}}],["temporal",{"0":{"18":2}}],["template",{"0":{"4":2,"7":1,"27":2,"42":1,"56":4}}],["templates",{"0":{"2":2}}],["termination",{"0":{"61":1}}],["terminated",{"0":{"43":1}}],["terminal",{"0":{"4":2,"56":1,"65":1}}],["terraform",{"0":{"2":3,"17":1}}],["team",{"0":{"2":6,"7":2,"51":1,"53":1,"62":1}}],["tests",{"0":{"67":1}}],["test",{"0":{"1":1,"2":4,"6":1,"17":1,"22":6,"27":1,"54":3,"59":1}}],["tis",{"0":{"63":4}}],["ti",{"0":{"27":1,"38":1,"65":1}}],["timkmecl",{"0":{"25":1}}],["timeout",{"0":{"68":2}}],["timestamp",{"0":{"28":1}}],["times",{"0":{"23":1,"29":1}}],["time",{"0":{"2":1,"6":1,"25":1,"28":1,"29":1,"44":1,"54":3,"56":2,"61":1,"68":2}}],["ticket",{"0":{"2":1,"59":1}}],["title=how+to+request+access+to+bitbucket+",{"0":{"1":1}}],["tier",{"0":{"0":1}}],["thusly",{"0":{"47":1,"56":2,"57":4}}],["than",{"0":{"24":1,"34":1,"61":1,"63":1}}],["that",{"0":{"0":1,"2":8,"3":2,"4":2,"5":2,"6":1,"8":1,"10":1,"12":2,"14":1,"15":3,"16":1,"18":3,"21":1,"22":4,"24":1,"25":2,"28":6,"29":1,"31":1,"34":8,"39":2,"40":3,"41":3,"42":3,"44":2,"45":3,"46":1,"47":4,"55":1,"56":19,"57":9,"58":1,"59":1,"61":14,"62":1,"64":1,"65":1,"66":1,"67":1,"68":1}}],["thinking",{"0":{"41":1}}],["think",{"0":{"25":1,"57":1,"61":1}}],["thing",{"0":{"23":1,"62":1}}],["things",{"0":{"12":1,"25":1,"49":1,"56":1,"62":1}}],["third",{"0":{"7":1}}],["this",{"0":{"2":18,"3":3,"4":1,"8":1,"10":1,"11":1,"12":2,"15":5,"17":3,"18":1,"20":1,"22":2,"25":12,"27":2,"34":3,"44":1,"45":3,"47":3,"48":1,"49":2,"52":5,"56":24,"57":15,"58":1,"59":2,"61":10,"62":2,"67":2,"68":3}}],["those",{"0":{"5":1,"8":1,"16":1,"22":1,"47":1,"56":1,"57":2,"62":1,"68":2}}],["three",{"0":{"22":1,"29":1,"33":1,"49":1}}],["threshold",{"0":{"2":1,"9":1}}],["through",{"0":{"2":1,"15":1,"17":1,"22":2,"34":1,"56":1,"57":1}}],["them",{"0":{"15":3,"22":4,"28":1,"40":1,"47":1,"49":1,"50":1,"56":3,"57":2,"59":1,"61":5}}],["their",{"0":{"8":1,"22":1,"44":1,"47":1,"61":4,"62":1,"68":1}}],["then",{"0":{"6":1,"10":1,"20":1,"22":3,"27":2,"32":2,"34":2,"35":3,"37":1,"39":1,"47":3,"52":2,"56":5,"57":4,"63":1,"66":1}}],["they",{"0":{"2":1,"3":1,"10":2,"17":2,"22":3,"28":2,"40":1,"49":1,"52":1,"56":2,"57":2,"61":2}}],["these",{"0":{"2":2,"6":1,"7":1,"17":2,"22":3,"25":1,"44":1,"49":4,"56":1,"57":6,"61":2,"68":1}}],["therefore",{"0":{"29":1,"45":1}}],["there",{"0":{"1":1,"2":2,"3":1,"7":1,"15":1,"22":1,"23":1,"29":1,"30":1,"33":1,"34":1,"39":2,"47":1,"56":5,"57":3,"61":2}}],["the",{"0":{"0":4,"1":1,"2":62,"3":25,"4":14,"5":7,"6":12,"7":12,"8":6,"9":1,"10":6,"11":9,"12":13,"14":7,"15":41,"16":5,"17":14,"18":7,"20":4,"22":78,"23":10,"24":6,"25":5,"27":7,"28":7,"29":14,"30":6,"31":1,"32":8,"33":5,"34":16,"35":3,"37":1,"39":17,"40":29,"41":6,"44":5,"45":40,"46":11,"47":21,"49":8,"51":1,"52":10,"53":7,"54":2,"55":4,"56":124,"57":83,"58":2,"59":7,"61":60,"62":6,"63":1,"64":3,"65":1,"66":2,"67":9,"68":5}}],["touching",{"0":{"61":1}}],["too",{"0":{"61":1}}],["tool",{"0":{"7":1,"42":2}}],["tools",{"0":{"5":1,"7":1,"56":2,"58":1}}],["tos",{"0":{"56":1,"62":1}}],["today",{"0":{"51":1}}],["todo",{"0":{"3":4,"16":1,"57":1}}],["topic",{"0":{"34":1}}],["top",{"0":{"23":1,"40":1}}],["total",{"0":{"22":1,"23":1,"43":3}}],["tokens",{"0":{"23":1,"52":1}}],["tokenproxy",{"0":{"13":1}}],["token",{"0":{"2":1,"14":2,"26":1,"35":1,"52":3}}],["to",{"0":{"0":4,"1":4,"2":51,"3":16,"4":15,"5":9,"6":6,"7":15,"8":6,"10":2,"11":2,"12":7,"13":1,"14":4,"15":23,"16":3,"17":2,"18":7,"20":8,"22":33,"23":4,"25":4,"26":4,"27":5,"28":7,"29":13,"30":3,"31":3,"32":8,"33":5,"34":24,"35":3,"39":10,"40":15,"41":6,"42":4,"44":3,"45":12,"46":1,"47":12,"48":3,"49":4,"50":1,"51":1,"52":10,"53":1,"54":1,"55":2,"56":69,"57":22,"58":1,"59":8,"61":32,"62":8,"63":3,"64":5,"65":2,"66":3,"67":7,"68":2}}],["src",{"0":{"58":1,"59":1,"61":1,"62":2}}],["skip",{"0":{"57":1,"67":1}}],["snf1w==",{"0":{"57":1}}],["snowflake",{"0":{"2":1,"7":2,"25":4,"42":4,"65":4}}],["switch",{"0":{"56":1}}],["sftp",{"0":{"27":4}}],["sleep",{"0":{"65":1}}],["sleistner",{"0":{"25":1}}],["slightly",{"0":{"57":1}}],["slack",{"0":{"23":1,"56":1}}],["slug",{"0":{"2":2,"22":2,"29":4,"49":1}}],["sqltypes",{"0":{"63":1}}],["sqltools",{"0":{"25":1}}],["sqlalchemy",{"0":{"63":9}}],["sqlfluff",{"0":{"25":2}}],["sql",{"0":{"18":2,"25":1,"42":1,"63":1}}],["symbol",{"0":{"57":1}}],["symbolic",{"0":{"46":1}}],["synchronization",{"0":{"29":1}}],["sync",{"0":{"8":2,"9":2,"22":2,"23":2,"29":13,"49":3,"61":1}}],["sys",{"0":{"46":3,"63":1}}],["sysadmin",{"0":{"7":3}}],["systems",{"0":{"15":1}}],["system32",{"0":{"4":1}}],["system",{"0":{"2":2,"5":2,"8":2,"14":4,"15":4,"21":1,"22":2,"27":3,"46":2,"56":11,"64":1}}],["scaffolded",{"0":{"61":1}}],["scope",{"0":{"56":2}}],["screen",{"0":{"56":1}}],["scritps",{"0":{"46":1}}],["scripts",{"0":{"6":2,"10":2,"12":1,"15":1,"17":2,"39":2,"54":1,"56":1,"62":2}}],["script",{"0":{"4":1,"15":1,"58":1}}],["scroll",{"0":{"22":1}}],["schedulable",{"0":{"63":1}}],["scheduling",{"0":{"63":4}}],["schedule",{"0":{"63":2}}],["schedules",{"0":{"61":1}}],["scheduler",{"0":{"29":2,"56":2,"61":1,"63":12}}],["scheduled",{"0":{"2":1}}],["schema",{"0":{"18":1}}],["scm",{"0":{"2":7,"3":1,"4":1,"5":2,"6":1,"15":1,"17":1}}],["s3",{"0":{"2":30,"9":13,"25":13,"29":6,"32":9,"33":2,"53":15}}],["shutting",{"0":{"56":2}}],["shellenv",{"0":{"56":2}}],["shell",{"0":{"54":1,"56":1}}],["sh",{"0":{"13":1,"24":3,"35":4,"46":1,"49":1,"54":1}}],["sha",{"0":{"28":1}}],["sharing",{"0":{"23":1}}],["share",{"0":{"2":5,"53":1}}],["sharepoint",{"0":{"1":1}}],["sha256withrsaencryption",{"0":{"57":4}}],["sha2",{"0":{"4":2}}],["shown",{"0":{"23":1}}],["show",{"0":{"11":1,"25":1,"34":1,"45":1,"49":2,"56":1,"57":1,"65":1}}],["should",{"0":{"2":1,"3":2,"4":1,"15":2,"16":1,"22":2,"28":1,"30":1,"34":1,"41":1,"45":1,"49":2,"56":5,"57":16,"61":5}}],["shortcut",{"0":{"4":2}}],["short",{"0":{"2":1,"25":1,"61":1}}],["safari",{"0":{"68":2}}],["safe",{"0":{"52":1,"57":1}}],["say",{"0":{"34":1,"57":1}}],["save",{"0":{"22":1,"31":1,"32":2,"40":1,"44":1,"49":1,"56":2,"68":2}}],["sales",{"0":{"22":1,"29":1}}],["sa",{"0":{"5":1,"7":1,"42":2}}],["sample",{"0":{"42":1}}],["samuelcolvin",{"0":{"25":1}}],["saml",{"0":{"2":1}}],["same",{"0":{"0":1,"1":1,"2":4,"3":1,"22":1,"28":1,"40":1,"45":1,"47":1,"56":4}}],["sandbox",{"0":{"2":1}}],["svc",{"0":{"2":1,"6":1,"18":1,"42":6,"46":1,"54":1}}],["side",{"0":{"64":1}}],["site",{"0":{"63":28}}],["simplify",{"0":{"61":1}}],["simply",{"0":{"56":1,"61":1}}],["simpler",{"0":{"65":1}}],["simple",{"0":{"44":2,"56":1}}],["similar",{"0":{"2":1,"56":2}}],["silent",{"0":{"30":1}}],["sigkilled",{"0":{"61":1}}],["sigterm",{"0":{"61":1}}],["sigs",{"0":{"14":1,"26":1,"46":1}}],["signal",{"0":{"61":1}}],["signaled",{"0":{"61":1}}],["signature",{"0":{"57":4}}],["signatures",{"0":{"2":1}}],["signing",{"0":{"46":1}}],["signup",{"0":{"22":1}}],["sign",{"0":{"2":1}}],["singleton",{"0":{"61":1}}],["single",{"0":{"2":2,"41":1,"52":1,"57":6,"61":1}}],["since",{"0":{"2":1,"25":1}}],["size",{"0":{"0":5,"9":3,"20":1,"43":1,"54":1,"68":1}}],["solution",{"0":{"63":1,"64":1,"68":1}}],["solve",{"0":{"62":1}}],["sort",{"0":{"23":1}}],["soon",{"0":{"22":1,"57":1}}],["source",{"0":{"24":2,"47":1,"56":1,"61":1,"62":1}}],["sources",{"0":{"5":1,"39":1,"46":2}}],["sourcecode",{"0":{"1":1,"2":8,"3":2,"4":1,"6":1,"15":2,"17":1,"39":1}}],["software",{"0":{"4":1,"46":1}}],["something",{"0":{"62":1}}],["sometimes",{"0":{"22":1,"30":1,"44":1,"56":1,"59":1,"65":1,"66":1,"67":1}}],["somewhat",{"0":{"61":1}}],["someone",{"0":{"15":1}}],["some",{"0":{"2":4,"24":1,"27":1,"29":1,"52":1,"54":1,"56":2,"57":1,"61":1,"67":1}}],["so",{"0":{"2":7,"3":3,"5":1,"7":1,"20":1,"23":1,"44":1,"52":1,"56":7,"57":1,"59":2,"61":3,"67":1}}],["s",{"0":{"2":6,"3":3,"6":1,"7":1,"8":1,"12":2,"15":3,"16":1,"20":1,"21":1,"22":4,"23":3,"26":1,"28":1,"34":1,"35":1,"38":1,"39":3,"40":1,"46":3,"47":1,"53":1,"56":18,"57":6,"61":8,"62":1,"64":2}}],["serialized",{"0":{"63":1}}],["serially",{"0":{"61":1}}],["serial",{"0":{"57":4}}],["series",{"0":{"47":2}}],["service=datacoves",{"0":{"14":1}}],["serviceaccount",{"0":{"2":1}}],["service",{"0":{"1":1,"2":9,"3":2,"5":2,"7":1,"8":6,"14":1,"18":1,"22":1,"26":1,"42":2,"45":1,"46":1,"59":1,"68":1}}],["services",{"0":{"0":1,"2":4,"3":2,"14":2,"15":2,"18":1,"22":5,"32":1,"61":1,"65":1}}],["servers",{"0":{"27":1,"28":1,"44":1}}],["server=",{"0":{"14":1}}],["server=jnj",{"0":{"8":1}}],["server",{"0":{"0":1,"4":1,"5":2,"11":5,"14":3,"15":3,"18":1,"22":1,"24":1,"26":1,"27":2,"35":3,"38":2,"40":6,"41":1,"44":1,"45":5,"56":8,"57":3,"58":3,"61":1}}],["self",{"0":{"46":1,"63":12}}],["selecting",{"0":{"52":1}}],["selects",{"0":{"22":1}}],["selected",{"0":{"5":1,"52":1}}],["select",{"0":{"5":1,"8":1,"22":2,"29":3,"32":8,"34":2,"52":2,"56":1,"59":2}}],["several",{"0":{"28":1,"57":2}}],["semantic",{"0":{"28":1}}],["seat",{"0":{"22":1}}],["seats",{"0":{"22":2}}],["search",{"0":{"7":2,"23":1,"29":1,"31":1}}],["sessions",{"0":{"68":1}}],["session=session",{"0":{"63":2}}],["session",{"0":{"13":1,"46":1,"63":6,"68":2}}],["sets",{"0":{"22":3,"40":2,"56":3}}],["setting",{"0":{"6":1,"22":1,"56":1,"57":1,"61":1}}],["settings",{"0":{"5":2,"44":1}}],["set",{"0":{"5":1,"6":1,"11":1,"12":2,"14":1,"17":1,"18":2,"20":1,"22":6,"26":1,"32":1,"39":1,"40":9,"42":1,"51":3,"52":1,"53":1,"56":16,"57":1,"59":2,"61":4,"67":2}}],["setupwithmanager",{"0":{"61":1}}],["setup",{"0":{"2":1,"3":2,"6":2,"12":1,"15":7,"17":1,"22":8,"56":2,"57":2,"62":1}}],["sentry",{"0":{"56":3}}],["sensitive",{"0":{"3":1,"16":1,"57":1}}],["sends",{"0":{"61":1}}],["send",{"0":{"2":1,"23":1,"61":1}}],["separate",{"0":{"3":1,"10":1,"17":1,"61":1,"62":1}}],["secert",{"0":{"57":1}}],["secure",{"0":{"42":1}}],["security",{"0":{"2":4,"50":1,"52":1}}],["seconds",{"0":{"23":2,"43":1}}],["section",{"0":{"12":1,"56":2,"57":4}}],["sections",{"0":{"2":1}}],["secret=",{"0":{"8":1}}],["secret",{"0":{"2":1,"3":7,"6":1,"8":9,"14":6,"15":6,"16":6,"17":1,"33":4,"39":2,"41":5,"52":4,"53":1,"57":16,"61":3,"62":4}}],["secrets",{"0":{"2":2,"3":3,"4":1,"5":1,"6":1,"8":4,"15":2,"16":4,"17":1,"39":3,"49":1,"52":1,"57":9,"61":1,"62":4}}],["seen",{"0":{"61":1}}],["seems",{"0":{"57":1}}],["seem",{"0":{"24":1}}],["see",{"0":{"1":1,"3":2,"10":3,"11":1,"15":1,"21":1,"22":2,"23":3,"30":1,"31":1,"54":1,"56":6,"57":6}}],["spawns",{"0":{"61":1}}],["space",{"0":{"56":1}}],["spacekey=ahrx",{"0":{"1":1}}],["spell",{"0":{"25":2}}],["spelufo",{"0":{"15":1}}],["spec",{"0":{"27":4,"45":3,"56":18,"61":1}}],["spec=k8s",{"0":{"21":1}}],["specifies",{"0":{"61":1}}],["specified",{"0":{"20":1,"22":1,"46":2}}],["specific",{"0":{"1":1,"7":1,"28":1,"40":1,"62":1}}],["specifying",{"0":{"22":1}}],["specify",{"0":{"2":1,"3":1,"59":1,"61":1}}],["special",{"0":{"2":2,"56":4,"57":1}}],["sudirectories",{"0":{"62":1}}],["sudo",{"0":{"4":1,"46":12,"56":5}}],["suspect",{"0":{"57":1}}],["suffix",{"0":{"33":1}}],["success=false",{"0":{"64":1}}],["successful",{"0":{"29":1}}],["succeed",{"0":{"61":1}}],["such",{"0":{"0":2,"2":2,"3":1,"12":1,"22":1,"28":1,"40":1,"49":2,"57":2,"61":1,"67":1}}],["sum",{"0":{"24":1,"43":7}}],["summary",{"0":{"0":1,"2":1,"9":1}}],["sure",{"0":{"5":1,"7":1,"15":1,"34":4,"47":2,"49":1,"52":1,"56":9,"57":6}}],["su",{"0":{"4":1,"56":3}}],["supbrocesses",{"0":{"61":1}}],["superset",{"0":{"3":2,"12":1,"18":1,"22":2,"28":3}}],["supported",{"0":{"56":1}}],["supports",{"0":{"2":1}}],["support",{"0":{"0":1,"4":2,"7":3,"12":3,"25":2,"51":2,"62":1,"66":1}}],["suggested",{"0":{"40":1}}],["suggest",{"0":{"2":1,"53":1}}],["subtle",{"0":{"61":1}}],["subprocesses",{"0":{"61":3}}],["subjectaltname",{"0":{"57":1}}],["subject=c=us",{"0":{"57":1}}],["subject",{"0":{"57":9}}],["subcription",{"0":{"45":3}}],["subdirectories",{"0":{"62":1}}],["subdirectory",{"0":{"15":1,"62":1}}],["subdomains",{"0":{"12":1}}],["subdomain",{"0":{"2":2}}],["subsection",{"0":{"57":1}}],["subsequent",{"0":{"57":1}}],["subscribing",{"0":{"22":1}}],["subscription",{"0":{"12":2,"20":1,"22":14,"32":1,"56":2}}],["subscriptions",{"0":{"12":1,"22":1,"45":3}}],["substitute",{"0":{"2":1}}],["submodule",{"0":{"3":4,"6":2,"10":1,"17":2,"39":2}}],["submodules",{"0":{"3":2,"10":1,"17":1,"62":1}}],["submit",{"0":{"2":2,"3":1,"4":1}}],["sub",{"0":{"2":1,"57":1}}],["ssassi",{"0":{"4":1}}],["ssh",{"0":{"4":6,"6":1,"26":2,"39":2,"44":1,"46":1,"56":3}}],["sso",{"0":{"2":2}}],["ssl",{"0":{"0":1,"2":2,"15":1,"35":1,"57":2}}],["ssd",{"0":{"0":1}}],["stuck",{"0":{"56":1}}],["structs",{"0":{"61":1}}],["struct",{"0":{"61":1}}],["structure",{"0":{"39":1,"42":1}}],["str",{"0":{"54":1}}],["strangely",{"0":{"49":1}}],["stream",{"0":{"54":1}}],["streams",{"0":{"54":1}}],["streamsets`",{"0":{"29":1}}],["streetsidesoftware",{"0":{"25":1}}],["strip",{"0":{"57":1}}],["stripe",{"0":{"22":29}}],["strings",{"0":{"30":1}}],["stringlike",{"0":{"2":1}}],["still",{"0":{"2":1,"61":1}}],["sts",{"0":{"2":2}}],["stephen",{"0":{"34":1}}],["steps",{"0":{"11":1,"15":1,"18":1,"45":1,"49":1,"56":5,"57":1}}],["step",{"0":{"2":2,"45":1,"46":3,"56":3,"57":5}}],["stockunpickler",{"0":{"63":1}}],["stop",{"0":{"2":1,"3":1,"23":1,"47":1,"56":1}}],["stored",{"0":{"15":1,"43":2,"61":1}}],["stores",{"0":{"5":1}}],["store",{"0":{"2":2,"24":1,"56":1}}],["storageclassname",{"0":{"45":2}}],["storage",{"0":{"0":3,"2":4,"9":4,"32":8,"33":4,"45":2,"53":1}}],["stable",{"0":{"21":1,"46":2}}],["stack",{"0":{"9":1,"54":2,"57":1}}],["starting",{"0":{"61":1}}],["started",{"0":{"23":1}}],["start",{"0":{"7":1,"22":2,"40":1,"44":2,"56":1,"57":1,"61":1}}],["star",{"0":{"4":1}}],["states",{"0":{"63":1}}],["state=state",{"0":{"63":1}}],["state",{"0":{"61":3,"63":2}}],["stateless",{"0":{"61":1}}],["statement",{"0":{"2":3,"9":1,"32":1,"47":1,"53":1,"56":1,"57":1}}],["stats",{"0":{"23":2}}],["status=",{"0":{"43":3}}],["status",{"0":{"2":1,"15":3,"25":1,"30":1,"34":1,"43":5,"57":2}}],["staff",{"0":{"2":4,"3":1}}],["standard",{"0":{"0":3,"2":1,"9":1,"20":1,"25":2}}],["f1",{"0":{"57":1}}],["f7",{"0":{"57":1}}],["f8",{"0":{"57":2}}],["fddcd2fc",{"0":{"45":1}}],["fact",{"0":{"61":1}}],["faq",{"0":{"61":1}}],["fa",{"0":{"57":2}}],["favorite",{"0":{"56":2}}],["fashion",{"0":{"56":1}}],["faster",{"0":{"2":1}}],["false",{"0":{"45":1}}],["fake",{"0":{"32":3}}],["failing",{"0":{"61":1}}],["failures",{"0":{"61":3}}],["failed",{"0":{"56":1}}],["fails",{"0":{"56":1}}],["fail",{"0":{"29":2,"61":1}}],["fcf8238f2a56",{"0":{"26":1}}],["fc",{"0":{"18":1}}],["flavio",{"0":{"26":1}}],["flag",{"0":{"15":1}}],["flower",{"0":{"23":3}}],["flexible",{"0":{"0":1,"18":1}}],["func",{"0":{"63":5}}],["function",{"0":{"61":1}}],["functionality",{"0":{"29":1,"67":1}}],["functions",{"0":{"4":1,"61":1}}],["future",{"0":{"62":1}}],["fully",{"0":{"61":1}}],["fullchain",{"0":{"35":2}}],["full",{"0":{"9":3,"45":2,"47":1,"57":1}}],["fulfilled",{"0":{"2":1}}],["fs",{"0":{"3":1,"16":1,"46":3}}],["f",{"0":{"3":1,"6":1,"14":5,"15":3,"16":1,"17":1,"22":1,"30":1,"39":1,"45":1,"57":1,"63":1,"67":2}}],["few",{"0":{"25":1,"56":2,"61":1}}],["features",{"0":{"28":3,"34":1,"47":3,"56":1}}],["feature",{"0":{"21":1,"22":1}}],["fetchall",{"0":{"63":4}}],["fetch",{"0":{"3":1,"6":1,"34":1,"56":1,"63":1}}],["federated",{"0":{"2":1}}],["federate",{"0":{"2":1}}],["federation",{"0":{"2":1}}],["fit",{"0":{"44":1}}],["fivetran",{"0":{"42":1}}],["fine",{"0":{"57":1}}],["finished",{"0":{"56":1}}],["final",{"0":{"40":1,"57":1}}],["finally",{"0":{"34":1,"40":1,"52":1,"56":1,"57":2,"66":1}}],["finalizers",{"0":{"61":1}}],["finalize",{"0":{"22":3}}],["find",{"0":{"2":1,"7":1,"18":1,"25":1,"32":1,"55":1,"57":2}}],["field",{"0":{"7":1,"22":5,"40":5,"44":1,"56":1}}],["fields",{"0":{"2":2,"22":1,"40":1,"68":1}}],["first",{"0":{"6":1,"15":1,"16":1,"22":1,"34":3,"39":1,"47":1,"56":3,"57":4,"61":1}}],["fixing",{"0":{"34":1}}],["fixes",{"0":{"28":1,"47":2}}],["fixtures",{"0":{"15":1}}],["fix",{"0":{"4":1,"34":3,"50":4}}],["fill",{"0":{"32":4}}],["filled",{"0":{"3":1,"16":1}}],["filtering",{"0":{"43":1}}],["filters",{"0":{"29":3,"56":1}}],["filter",{"0":{"23":1,"29":1,"32":2}}],["filtered",{"0":{"2":1}}],["fileutils",{"0":{"25":1}}],["file",{"0":{"2":2,"12":1,"15":1,"25":3,"45":1,"47":2,"53":1,"54":1,"56":6,"57":6,"59":2,"63":31,"64":1,"65":1,"67":1}}],["filesystem",{"0":{"2":1}}],["files",{"0":{"2":4,"3":2,"10":1,"12":1,"15":2,"16":1,"17":2,"25":1,"34":1,"39":1,"40":3,"46":2,"47":2,"57":25,"58":1}}],["four",{"0":{"56":1}}],["found",{"0":{"5":1,"57":1,"68":2}}],["folling",{"0":{"28":1}}],["followed",{"0":{"15":1,"56":1}}],["follows",{"0":{"3":1,"17":1,"22":1,"56":1}}],["follow",{"0":{"2":4,"3":1,"4":1,"22":3,"28":1,"39":1,"46":1,"57":1}}],["following",{"0":{"2":1,"4":2,"5":3,"6":1,"7":1,"14":1,"15":1,"20":1,"22":1,"29":3,"34":2,"45":1,"46":2,"47":1,"49":1,"52":1,"53":1,"56":3,"57":7,"62":1}}],["folder",{"0":{"4":2,"37":1,"47":1}}],["format",{"0":{"47":1,"57":6}}],["form",{"0":{"40":1,"48":1,"52":1}}],["forget",{"0":{"22":1,"40":1}}],["forceful",{"0":{"61":1}}],["force",{"0":{"12":1,"54":1}}],["forwarded",{"0":{"68":5}}],["forward",{"0":{"2":1,"27":1}}],["for",{"0":{"0":5,"1":1,"2":19,"3":4,"4":1,"5":6,"6":2,"7":7,"8":2,"9":9,"10":1,"12":1,"15":7,"17":4,"22":6,"23":1,"24":3,"25":2,"30":1,"33":2,"38":1,"39":2,"40":2,"41":1,"42":2,"44":1,"45":1,"46":3,"47":4,"49":2,"53":3,"56":16,"57":8,"58":1,"61":5,"62":6,"63":2,"66":1,"68":2}}],["framework",{"0":{"61":3}}],["frequently",{"0":{"61":1}}],["frequency",{"0":{"4":1}}],["free",{"0":{"0":1,"22":4}}],["from",{"0":{"2":10,"3":3,"5":1,"6":1,"7":1,"8":2,"11":1,"12":3,"15":6,"17":2,"18":1,"22":2,"23":2,"24":2,"25":1,"28":1,"29":1,"30":1,"34":5,"35":1,"39":1,"40":1,"41":2,"44":1,"45":2,"46":2,"47":2,"52":1,"56":9,"57":3,"61":2,"64":1,"67":1}}],["fron",{"0":{"1":1}}],["a0",{"0":{"57":1}}],["a9",{"0":{"57":1}}],["a9d047415b53",{"0":{"45":2}}],["a7",{"0":{"57":1}}],["ae",{"0":{"57":1}}],["aed",{"0":{"2":1,"3":1}}],["ah",{"0":{"57":1}}],["ahrx",{"0":{"1":1,"3":1,"4":1,"6":1,"15":1,"17":1,"39":4}}],["ag",{"0":{"46":1}}],["against",{"0":{"56":1}}],["again",{"0":{"46":1,"57":3,"66":2,"68":1}}],["agent",{"0":{"54":1,"56":2,"68":2}}],["age",{"0":{"15":2,"56":2}}],["a278",{"0":{"26":2}}],["aware",{"0":{"57":1}}],["awhile",{"0":{"49":1,"56":1}}],["awk",{"0":{"14":2,"38":1}}],["awswexnval0001",{"0":{"26":1}}],["awswcrnval001n",{"0":{"4":2}}],["awsaztirll000q",{"0":{"4":1}}],["aws",{"0":{"2":6,"4":1,"7":1,"9":2,"10":1,"14":7,"15":1,"17":2,"24":1,"26":11,"32":2,"33":3,"53":3,"56":1}}],["aio",{"0":{"14":1}}],["airfow",{"0":{"3":1}}],["airflow",{"0":{"2":10,"3":3,"9":1,"12":1,"16":4,"18":1,"21":4,"22":2,"28":3,"29":5,"30":1,"40":4,"42":3,"43":2,"56":5,"58":2,"63":48}}],["airbyte",{"0":{"2":3,"3":2,"12":1,"18":2,"22":2,"28":3,"42":2}}],["abb5",{"0":{"68":2}}],["able",{"0":{"57":1,"67":1}}],["absolute",{"0":{"34":1}}],["abstract",{"0":{"13":1}}],["about",{"0":{"22":1,"27":1,"56":2,"57":2}}],["above",{"0":{"7":1,"56":2,"57":6}}],["aks",{"0":{"12":2,"20":5,"45":1,"56":5}}],["affects",{"0":{"61":1}}],["affecting",{"0":{"59":1}}],["af14",{"0":{"12":2}}],["afterwards",{"0":{"57":1}}],["after",{"0":{"3":1,"4":1,"6":1,"10":1,"15":1,"16":1,"22":1,"32":5,"34":2,"46":1,"55":1,"57":7,"61":1,"66":1}}],["at",{"0":{"2":1,"5":1,"8":1,"13":1,"15":1,"22":1,"23":2,"40":1,"44":1,"51":1,"56":5,"61":2}}],["attempted",{"0":{"56":1}}],["attached",{"0":{"24":1}}],["attachments",{"0":{"24":2}}],["attachment",{"0":{"24":1}}],["attach",{"0":{"2":3}}],["attributes",{"0":{"2":1,"57":2}}],["am",{"0":{"56":1}}],["amd64",{"0":{"26":3}}],["amazon",{"0":{"14":4}}],["amazonaws",{"0":{"2":4,"25":13,"26":1}}],["amorer01",{"0":{"4":2}}],["amp",{"0":{"1":1,"2":1,"4":1,"7":2,"37":2,"44":1,"52":1,"56":6}}],["apr",{"0":{"57":5}}],["apache",{"0":{"21":2}}],["apt",{"0":{"13":1,"46":12}}],["apiserver",{"0":{"26":1}}],["apiversion",{"0":{"26":2,"27":1,"45":2}}],["api",{"0":{"2":3,"3":3,"9":2,"12":1,"14":2,"15":4,"16":3,"23":1,"35":1,"52":1,"53":3,"54":2,"56":13,"57":4,"59":1,"61":5}}],["appear",{"0":{"57":1}}],["appropriate",{"0":{"34":1,"57":1}}],["appropiate",{"0":{"15":1,"40":1}}],["app=core",{"0":{"15":1}}],["applewebkit",{"0":{"68":2}}],["apple",{"0":{"56":1}}],["applies",{"0":{"61":1}}],["applied",{"0":{"45":1,"56":1}}],["application",{"0":{"2":1,"4":5,"15":1,"54":1}}],["applying",{"0":{"61":1}}],["apply",{"0":{"14":5,"39":1,"45":1,"56":2,"61":2}}],["apps",{"0":{"6":1,"15":4,"17":1,"24":1,"39":1,"52":1}}],["app000300001207",{"0":{"4":1}}],["app",{"0":{"2":5,"4":5,"5":1,"7":13,"50":2,"52":4,"56":5}}],["appdevtools",{"0":{"2":1,"7":3,"42":1,"66":1}}],["average",{"0":{"23":1}}],["avoid",{"0":{"2":1,"61":1}}],["available",{"0":{"2":1,"47":1,"56":2,"61":1}}],["availability",{"0":{"0":1}}],["azt",{"0":{"2":1,"15":1}}],["az",{"0":{"2":2,"9":1,"12":5,"20":6,"45":5,"56":2}}],["azuread",{"0":{"52":4}}],["azurepubliccloud",{"0":{"26":1}}],["azure",{"0":{"0":1,"1":1,"12":6,"18":4,"20":1,"26":1,"32":3,"33":3,"45":2,"46":1,"52":3,"56":7}}],["auxwf",{"0":{"38":1}}],["aug",{"0":{"5":1,"57":1}}],["audit",{"0":{"50":5}}],["aud",{"0":{"2":1}}],["auth0",{"0":{"52":1}}],["auth",{"0":{"26":1}}],["authenticator",{"0":{"26":6}}],["authenticate",{"0":{"12":1,"68":3}}],["authentication",{"0":{"2":1,"9":1,"26":1,"44":1,"52":1,"66":1}}],["authority",{"0":{"26":1,"47":1,"68":1}}],["authoritative",{"0":{"3":1,"23":1}}],["authorization",{"0":{"2":1}}],["autogenerated",{"0":{"28":1}}],["autoscaler",{"0":{"20":1}}],["autoscaling",{"0":{"2":1,"9":1}}],["automatic",{"0":{"56":1}}],["automatically",{"0":{"5":1,"22":3,"45":1,"47":1,"49":1,"56":1}}],["automate",{"0":{"2":1,"42":1}}],["auto",{"0":{"0":1,"47":1}}],["aurora",{"0":{"2":2,"9":1}}],["adapter",{"0":{"67":1}}],["adapter=s3",{"0":{"53":1}}],["advantage",{"0":{"61":1}}],["adjustments",{"0":{"29":1,"52":1}}],["adjust",{"0":{"29":1}}],["adjusting",{"0":{"22":2}}],["aduser",{"0":{"26":2}}],["ad",{"0":{"7":1,"52":1}}],["adding",{"0":{"27":1,"40":1,"46":1,"56":1}}],["additional",{"0":{"2":2,"17":1,"34":1,"56":1}}],["adds",{"0":{"25":8}}],["addresses",{"0":{"27":2}}],["address",{"0":{"22":1,"27":1,"56":2,"61":1}}],["added",{"0":{"4":1,"25":1,"56":1}}],["add",{"0":{"2":1,"3":3,"15":2,"16":2,"20":6,"22":5,"27":2,"32":3,"33":1,"39":3,"40":2,"42":3,"46":4,"50":1,"52":4,"54":1,"56":7,"57":6,"67":4}}],["addons",{"0":{"2":1}}],["adw",{"0":{"2":2}}],["administrator",{"0":{"20":2}}],["administrate",{"0":{"20":1,"56":1}}],["admin",{"0":{"0":1,"2":2,"4":1,"5":1,"7":7,"14":3,"20":1,"22":8,"31":1,"40":2,"52":1}}],["around",{"0":{"44":1,"56":1}}],["args=args",{"0":{"63":1}}],["args",{"0":{"26":1,"30":3,"63":7}}],["argocd",{"0":{"2":1,"3":1}}],["architecture",{"0":{"2":1}}],["arn",{"0":{"2":5,"9":2,"26":1,"53":2}}],["articles",{"0":{"44":1}}],["artifacory",{"0":{"11":1}}],["artifacts",{"0":{"53":1}}],["artifactrepo",{"0":{"8":1,"11":1,"65":1,"66":2}}],["artifact",{"0":{"8":1}}],["artifactory",{"0":{"2":1,"7":2,"11":4,"66":2}}],["artemiseks",{"0":{"2":2}}],["artemis",{"0":{"1":2,"2":3,"25":1}}],["aren",{"0":{"56":2}}],["area",{"0":{"5":1,"22":1,"56":1}}],["are",{"0":{"2":2,"3":3,"4":1,"8":1,"10":1,"11":1,"17":4,"22":4,"25":1,"28":4,"29":2,"30":1,"33":1,"34":4,"39":1,"40":6,"44":1,"45":1,"47":6,"49":2,"56":16,"57":6,"61":7,"62":3,"65":1,"68":1}}],["acmesh",{"0":{"35":1}}],["acme",{"0":{"35":4}}],["across",{"0":{"8":1}}],["activate",{"0":{"56":1,"57":1}}],["activity",{"0":{"56":1}}],["active",{"0":{"2":1}}],["actions",{"0":{"32":2,"39":1}}],["action",{"0":{"1":1,"2":5,"9":2,"32":2,"45":2,"53":2}}],["acces",{"0":{"66":1}}],["accessible",{"0":{"56":1}}],["accessing",{"0":{"56":2}}],["accessmodes",{"0":{"45":2}}],["accessed",{"0":{"2":1}}],["access",{"0":{"1":9,"2":13,"3":1,"4":3,"5":2,"6":1,"7":10,"14":1,"15":2,"16":1,"33":1,"39":1,"41":1,"42":1,"48":1,"52":2,"53":4,"56":11,"68":1}}],["accidentally",{"0":{"34":1}}],["according",{"0":{"56":1}}],["accordingly",{"0":{"2":1,"4":1,"17":1,"18":1,"22":2,"40":1,"48":1,"52":1,"53":1,"58":1}}],["accounts",{"0":{"22":5,"42":2,"52":1}}],["account",{"0":{"2":11,"5":2,"8":6,"12":1,"14":1,"18":1,"20":2,"22":15,"23":1,"32":6,"33":2,"42":1,"56":2,"66":3}}],["announcement",{"0":{"47":1}}],["annotate",{"0":{"8":2}}],["annotations",{"0":{"15":1}}],["annotation",{"0":{"0":1}}],["answers",{"0":{"44":1}}],["another",{"0":{"22":1,"34":1,"45":1}}],["analytics",{"0":{"2":4,"4":1,"28":1,"40":1}}],["anything",{"0":{"34":1,"64":1}}],["anymore",{"0":{"12":1}}],["any",{"0":{"2":3,"22":1,"28":2,"34":2,"39":2,"46":1,"49":2,"56":8,"57":3,"61":3,"64":1}}],["ank",{"0":{"2":2,"6":1,"17":2}}],["an",{"0":{"2":16,"3":1,"6":1,"7":1,"10":1,"11":1,"13":1,"15":3,"17":1,"20":3,"22":4,"23":1,"24":2,"30":1,"34":1,"35":1,"41":1,"44":1,"49":1,"52":1,"53":3,"56":7,"57":5,"61":3,"62":2,"64":1,"67":1}}],["and",{"0":{"0":3,"1":2,"2":11,"3":4,"4":4,"5":5,"6":2,"7":6,"8":2,"10":1,"12":4,"14":3,"15":15,"16":4,"17":2,"18":3,"22":25,"23":1,"24":4,"25":5,"26":1,"27":2,"28":3,"29":5,"30":1,"31":1,"32":10,"34":6,"35":7,"37":1,"39":2,"40":12,"41":2,"42":1,"43":1,"44":6,"45":5,"46":7,"47":4,"49":5,"50":1,"52":10,"54":1,"56":42,"57":25,"58":3,"59":5,"61":24,"62":2,"63":1,"64":1,"66":2,"67":2,"68":2}}],["alright",{"0":{"57":1}}],["already",{"0":{"3":1,"8":1,"15":2,"20":1,"29":1,"34":1,"41":1,"45":1,"56":2}}],["algorithm",{"0":{"57":4}}],["alarms",{"0":{"56":1}}],["alert",{"0":{"54":5}}],["alerts",{"0":{"54":2}}],["alphabet",{"0":{"24":1}}],["alpha2",{"0":{"14":2}}],["altered",{"0":{"56":1}}],["alternatively",{"0":{"3":1}}],["alter",{"0":{"2":1,"18":1,"56":1}}],["along",{"0":{"2":1}}],["allrows",{"0":{"63":2}}],["allows",{"0":{"56":1}}],["allow",{"0":{"2":5,"9":2,"22":1,"32":2,"44":1,"53":2,"56":2,"61":1,"68":1}}],["allowed",{"0":{"0":1,"56":3}}],["allocated",{"0":{"2":1,"9":1}}],["all",{"0":{"1":2,"3":1,"7":2,"8":2,"11":1,"15":1,"16":1,"22":2,"28":1,"31":1,"32":3,"34":2,"40":2,"41":2,"46":1,"47":4,"49":1,"56":8,"57":1,"58":1,"61":3,"63":6,"64":1}}],["also",{"0":{"0":1,"2":1,"7":1,"15":1,"18":1,"22":2,"29":1,"35":1,"52":1,"56":3,"57":2,"59":1,"61":1}}],["aspect",{"0":{"49":1}}],["asc",{"0":{"41":3,"46":1}}],["asdf",{"0":{"4":2}}],["assuming",{"0":{"57":1}}],["assume",{"0":{"15":2}}],["assumerolewithwebidentity",{"0":{"2":1}}],["assemble",{"0":{"57":1}}],["assist",{"0":{"56":1}}],["associated",{"0":{"40":1}}],["associate",{"0":{"2":1}}],["asked",{"0":{"57":1}}],["ask",{"0":{"2":1,"3":1,"15":1,"20":2,"34":3,"47":1,"56":1}}],["asx",{"0":{"1":1,"3":1,"4":1,"6":1,"7":2,"15":1,"17":1,"39":4}}],["as",{"0":{"0":1,"1":1,"2":5,"3":6,"5":1,"6":1,"7":3,"10":2,"11":1,"12":2,"15":4,"17":3,"22":8,"28":1,"30":1,"33":1,"34":2,"35":2,"39":1,"40":1,"42":1,"44":1,"46":1,"47":3,"49":1,"52":1,"56":6,"57":9,"61":5,"67":1}}],["a",{"0":{"0":3,"1":1,"2":25,"3":4,"4":3,"5":3,"6":4,"7":1,"9":1,"10":2,"11":1,"12":1,"13":1,"14":2,"15":13,"16":1,"17":1,"20":11,"21":2,"22":21,"23":2,"24":3,"25":5,"26":1,"27":2,"28":2,"29":1,"30":2,"31":1,"32":2,"33":1,"34":16,"39":10,"40":6,"41":4,"42":1,"44":2,"45":3,"46":3,"47":2,"48":1,"52":4,"53":3,"55":2,"56":54,"57":15,"58":1,"59":4,"61":31,"62":1,"63":1,"64":1,"65":2,"66":1,"67":2,"68":2}}],["ok",{"0":{"57":1,"68":1}}],["okay",{"0":{"57":1}}],["o=digicert",{"0":{"57":9}}],["o=jsonpath=",{"0":{"2":1}}],["oomkilled",{"0":{"43":1}}],["ou=www",{"0":{"57":9}}],["ourselves",{"0":{"56":1}}],["our",{"0":{"24":1,"25":2,"28":2,"32":2,"33":1,"47":1,"51":1,"56":2,"57":2,"61":3,"67":1}}],["outside",{"0":{"65":1}}],["output",{"0":{"23":1,"39":1,"41":1,"56":2,"57":2}}],["outlined",{"0":{"15":1}}],["outdated",{"0":{"2":1,"3":1}}],["out",{"0":{"2":3,"3":2,"47":3,"49":1,"56":11,"57":10,"61":2,"66":1}}],["occurs",{"0":{"56":2}}],["occur",{"0":{"22":1}}],["oci",{"0":{"11":4}}],["obj",{"0":{"63":1}}],["objects",{"0":{"18":1,"32":5}}],["object",{"0":{"15":2,"32":2}}],["obsolete",{"0":{"25":1}}],["observability",{"0":{"9":1,"54":1}}],["observavility",{"0":{"9":1}}],["over",{"0":{"61":1}}],["overview",{"0":{"52":1,"56":1,"61":1}}],["override",{"0":{"21":5}}],["overwrite",{"0":{"15":1}}],["overall",{"0":{"2":1}}],["o",{"0":{"8":2,"13":1,"15":1,"45":1}}],["owned",{"0":{"18":1}}],["ownerreferences",{"0":{"61":1}}],["owner",{"0":{"2":1,"18":4,"22":1,"46":1}}],["own",{"0":{"8":1,"44":1,"61":2}}],["oidc",{"0":{"2":4}}],["opusfwngvhomii2aiyeptvnqdslqv59muxpui8r6aw",{"0":{"57":1}}],["op",{"0":{"27":2}}],["operations",{"0":{"61":1}}],["operation",{"0":{"61":2}}],["operator",{"0":{"15":10,"61":10,"67":1}}],["openssl",{"0":{"35":1,"57":22}}],["openssh",{"0":{"4":1}}],["open",{"0":{"4":1,"14":2,"56":1}}],["openid",{"0":{"2":2,"52":1}}],["option",{"0":{"54":2,"57":1,"67":2}}],["optional",{"0":{"0":2,"9":1,"12":1,"14":1,"35":1,"42":2}}],["opt",{"0":{"2":3,"3":1,"22":1}}],["oauth2",{"0":{"52":1}}],["oauth",{"0":{"2":2,"52":1}}],["old",{"0":{"1":1,"8":1,"20":1,"45":4,"47":1,"56":2}}],["other",{"0":{"0":1,"2":2,"5":1,"7":1,"9":1,"10":1,"15":1,"17":2,"32":3,"34":2,"56":3,"57":1,"59":1,"61":1}}],["os",{"0":{"0":1,"68":2}}],["orm",{"0":{"63":2}}],["origien",{"0":{"45":1}}],["origin",{"0":{"45":3}}],["original",{"0":{"34":1,"57":1}}],["orchestrate",{"0":{"42":1,"56":1}}],["orchestration",{"0":{"4":1,"42":1}}],["organized",{"0":{"62":1}}],["organization",{"0":{"56":1}}],["organizational",{"0":{"52":1}}],["org",{"0":{"21":1}}],["ordering",{"0":{"57":2}}],["order",{"0":{"7":3,"8":1,"22":1,"56":1,"57":3}}],["orrumcorp",{"0":{"12":3}}],["orrum",{"0":{"1":1,"12":18,"25":2,"27":5,"56":11,"57":20,"68":2}}],["or",{"0":{"0":3,"2":5,"3":1,"4":1,"6":1,"8":1,"9":1,"11":2,"15":3,"22":6,"23":2,"24":1,"28":3,"29":2,"34":6,"40":3,"44":1,"47":1,"56":3,"57":2,"58":1,"61":5,"63":1,"65":1,"67":1,"68":1}}],["often",{"0":{"56":1,"61":1}}],["offline",{"0":{"49":1}}],["official",{"0":{"35":1}}],["off",{"0":{"23":1,"49":1,"51":2}}],["of",{"0":{"0":1,"2":8,"3":2,"4":3,"5":1,"9":1,"15":5,"18":1,"22":3,"23":1,"24":7,"25":2,"27":1,"28":1,"29":2,"30":1,"32":5,"34":2,"39":2,"40":1,"41":1,"47":6,"49":2,"52":1,"56":20,"57":11,"58":1,"61":16,"63":1,"64":1,"65":2,"68":1}}],["online",{"0":{"56":1}}],["only",{"0":{"2":1,"9":1,"22":2,"34":2,"39":4,"44":2,"45":1,"52":2,"56":1,"57":2,"61":1,"64":2}}],["onto",{"0":{"34":1}}],["onmicrosoft",{"0":{"12":3}}],["once",{"0":{"2":1,"6":2,"17":1,"22":4,"34":2,"40":1,"42":1,"52":1,"56":3,"57":2,"59":1,"61":1}}],["ones",{"0":{"2":1,"22":1,"40":1}}],["one",{"0":{"2":6,"3":1,"4":1,"7":2,"8":1,"9":2,"22":3,"23":2,"34":2,"40":1,"45":1,"53":1,"56":6,"57":4,"62":1}}],["onboarding",{"0":{"1":3}}],["on",{"0":{"0":1,"1":1,"2":14,"3":3,"4":6,"5":1,"6":1,"7":7,"8":1,"9":1,"10":2,"11":2,"12":5,"13":1,"14":3,"15":3,"16":1,"17":2,"18":5,"19":1,"20":1,"21":2,"22":22,"23":2,"24":1,"25":3,"26":1,"27":1,"28":2,"29":2,"30":3,"31":1,"32":6,"33":1,"34":2,"35":1,"36":1,"37":2,"38":2,"39":1,"40":6,"41":1,"42":12,"43":2,"44":1,"45":1,"46":4,"47":1,"48":2,"49":2,"50":1,"51":3,"52":8,"53":2,"54":3,"55":3,"56":13,"57":1,"58":3,"59":3,"60":1,"61":1,"62":1,"63":2,"64":2,"65":1,"66":2,"67":1,"68":4,"69":1}}]],"serializationVersion":2} \ No newline at end of file diff --git a/docs/client-docs/README.md b/docs/client-docs/README.md new file mode 100644 index 00000000..1ab5a0e0 --- /dev/null +++ b/docs/client-docs/README.md @@ -0,0 +1,37 @@ +# Customer clusters + +## jnj + +1. Artemis +2. Artemis dev +3. Ensemble +4. Ensemble test +5. RND + +### Requirements: +* Access to jnj workspace (worspace is provided by jnj and is personal) + * Request access: Onboarding (Noel) + * Check access: https://jnjitod.service-now.com/myworkspaces +* Repo with all configurations https://sourcecode.jnj.com/projects/asx-ahrx/repos/datacoves_deployment/browse (READ.me with all configurations). There will be a specific repo for each clusters (onboarding Noel). + * Request access: https://confluence.jnj.com/pages/viewpage.action?spaceKey=AHRX&title=How+to+request+access+to+Bitbucket+-+How+to+request+access+-+How+to+guides +* Access to Bastion + +## kenvue + +1. Chap dev +2. Chap production + +### Requirements: +* Access to kenvue microsoft remote desktop (provided by jnj and is personal) + * Request access: Onboarding (Noel) + * Check access: https://kenvue.sharepoint.com/ +* Repo is the same as jnj +* Access to Bastion + +## orrum +1. old +2. new + +### Requirements: +* Download VPN fron Azure (see client-docs instructions) +* Credentials in 1 password diff --git a/docs/client-docs/ccs/cluster-requirements-azure.md b/docs/client-docs/ccs/cluster-requirements-azure.md new file mode 100644 index 00000000..ac0f9b98 --- /dev/null +++ b/docs/client-docs/ccs/cluster-requirements-azure.md @@ -0,0 +1,97 @@ +# Summary for the requirements of a new Cluster. + +## Database (Azure Database for PostgreSQL - Flexible Server) + +### Minimum requirements + +- Version: 14 or later +- Workload Type: Production +- Compute+Storage: General Purpose, D4ds_v5 +- Geo-Redundancy and High Availability optional but recommended. +- Admin user/password required and must be provided to Datacoves. +- Storage Type: Premium SSD +- Storage Size: 128 GiB +- Performance Tier: P10 +- Storage auto growth enabled optional but recommended. + +## Kubernetes Services + +### Configuration + +- Kubernetes version: 1.30.6 or later + +### Node pools +* general +* volumed +* workers - Standard_D4s_v3 node, 128 gig OS disk size + +### Worker groups + +* General +* Volumed +* Workers + +#### General + +- Standard_D4s_v3 +- min_nodes: 1 +- max_nodes: 4 +- root_volume_size: 128 +- labels: + +```yaml +labels: + ... + - key: k8s.datacoves.com/nodegroup-kind + value: general +``` + +#### Volumed + +- Standard_D16s_v5 +- min_nodes: 1 +- max_nodes: 4 +- root_volume_size: 512 +- labels: + +```yaml +labels: + ... + - key: k8s.datacoves.com/nodegroup-kind + value: volumed +``` + +#### Workers + +- min_nodes: 1 +- max_nodes: 4 +- root_volume_size: 128 +- labels: + +```yaml +labels: + ... + - key: k8s.datacoves.com/workers + value: enabled +``` + + +## Other configuration. + +#### SSL Certificate + +We recommend using a wildcard certificate, however we can also use cert manager for free certificates if that is the preference. + +Certificates must be issued for: + +- `*.domain.com` +- `domain.com` + +Where 'domain.com' is whatever base domain you wish to use. We recommend using "datacoves.YOUR_DOMAIN.YOUR_TLD", such as 'datacoves.mycompany.com'. In such a case, you would need certificates for: + +* `*.datacoves.mycompany.com` +* `datacoves.mycompany.com` + +#### DNS Configuration + +Either DNS must be configured to support the same wildcard and base domain, or the cluster must be allowed to create DNS entries via kubernetes' external-dns annotation. diff --git a/docs/client-docs/jnj/1-cluster-requirements.md b/docs/client-docs/jnj/1-cluster-requirements.md new file mode 100644 index 00000000..b794e220 --- /dev/null +++ b/docs/client-docs/jnj/1-cluster-requirements.md @@ -0,0 +1,313 @@ +# Datacoves cluster requirements + +[Summary for the requirements of a new Cluster.](./8-summary-requirements-new-cluster.md) + +## EKS cluster + +The clusters are created through CloudX pipelines, from `cluster.yaml` files ([docs](https://confluence.jnj.com/display/AGAP/Deploying+VPCx+EKS+Cluster)). +For every cluster there's a git repository with the cluster definition. If your +team create one of this repositories, please either grant access to datacoves staff so +we can make changes if required or ask us to check your `cluster.yaml`. + +An example repository of this kind is [itx-ank/ensemble](https://sourcecode.jnj.com/scm/itx-ank/ensemble). + +Important configuration to take into consideration: + +- Kubernetes version: latest confirmed working version. This is either -1 or -2 releases from current based on the time of year. +- Addons versions +- Worker groups: general, volumed, and workers. + +### Cluster configuration files + +| Cluster | Repository | Branch | +|------------------|--------------------------------------------------------------------------------------------|-------------| +| Ensemble test | https://sourcecode.jnj.com/projects/ITX-ANK/repos/ensemble/browse/_scm_cluster | test | +| Ensemble | https://sourcecode.jnj.com/projects/ITX-CCC/repos/ensemble/browse/_scm_cluster | production | +| R&D | https://sourcecode.jnj.com/projects/ITX-BHE/repos/integrationscluster/browse/_scm_cluster | test | +| Artemis Dev | https://sourcecode.jnj.com/projects/ITX-ADW/repos/artemiseks/browse/_scm_cluster | development | +| Artemis | https://sourcecode.jnj.com/projects/ITX-ADW/repos/artemiseks/browse/_scm_cluster | production | +| Chap development | https://sourcecode.jnj.com/projects/ITX-WCR/repos/datacove/browse/_scm_cluster | development | +| Chap production | https://sourcecode.jnj.com/projects/ITX-WCR/repos/datacove/browse/_scm_cluster | production | + +Once the cluster was provisioned, you'll receive an e-mail containing the details to configure `kubectl`. Please forward to the datacoves team. + +The installer will need kubectl access to the cluster [docs](https://confluence.jnj.com/display/AGAP/EKS+RBAC+Overview). + +### Opt out from EFS CSI driver + +The EFS CSI driver installed by cloudx is usually outdated (v1.0.0) so we need to opt out from the cloudx managed service. + +To opt out from EFS CSI managed driver, create a pull request on this repo, similar to this [one](https://sourcecode.jnj.com/projects/ITX-AED/repos/cloudx_container_pipelines_configs/pull-requests/257/diff#argocd/config.yaml). + +### External DNS + +In the cluster.yaml configuration there is a key `external_dns`. This key deploys the service [External DNS](https://github.com/kubernetes-sigs/external-dns) to the cluster, managed by CloudX. +This service might not be available in some clusters yet, so a manual configuration might be needed on Route53 or any other DNS service, typically a CNAME record pointing to the cluster's load balancer hostname. + +#### Getting load balancer's hostname + +```shell +kubectl -n ingress-nginx get svc ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}' +``` + +### SSL Certificates Manager + +CloudX will install [Cert Manager](https://cert-manager.io/) if the cluster supports it. + +If Cert Manager is not installed, 2 SSL certificates need to be issued manually: +- wildcard certificate: *.[SUBDOMAIN].[DOMAIN] +- root certificate: [SUBDOMAIN].[DOMAIN] + +A certificate chain file and a Private key are required for each certificate, please send the 4 files to Datacoves staff. + +## Git repositories + +### Config repo + +Each datacoves installation requires a configuration repo where Datacoves staff will store configuration details. + +Please create one repo per kubernetes cluster and grant access to Datacoves staff. + +### Dbt analytics repo + +This is the repo where your analytics (dbt) project resides, along with airflow dags, db security roles, documentation, etc. + +### Git Service Account + +Please create a Service Account with read access to the analytics repo, since that service account will be configured on services like Airflow and dbt-docs to read files from the repo. + +To do so, submit a PR to have Cloudx stop managing the currently installed driver here: https://sourcecode.jnj.com/projects/ITX-AED/repos/cloudx_container_pipelines_configs/browse/argocd/config.yaml#19 + +This account will be also used by Jenkins to download images from artifactory (taqy-docker namespace), so please request access to `taqy-docker` on that account via AppDevTools. + +## Database + +Some services require Postgres databases, as described below. These databases can share an RDS instance or aurora cluster. You will need to create this database cluster/instance and ensure it can be accessed from the EKS cluster. + +### Minimum requirements + +- Engine: Postgres +- Version: 14.9 +- Multi-AZ: "Single DB Instance" for sandbox clusters, "Multi-AZ DB Cluster" if not. +- Master user: postgres +- Master password: +- Instance class: db.r5.large +- Storage type: Aurora Standard or gp2 +- Allocated_storage: 100GB +- Enable storage autoscaling +- Maximum storage threshold: 1TB +- Authentication: password + +Keep in mind that JNJ cycles the master password every 24 hours so you need to run any setup command using this password before that happens. + +### Initial database and user + +You'll need to create a master Postgres user and the datacoves database: + +```SQL +CREATE USER datacoves PASSWORD insert_generated_random_password_without_special_characters; +ALTER USER datacoves CREATEDB CREATEROLE; +GRANT datacoves TO postgres; +CREATE DATABASE datacoves OWNER datacoves; +REVOKE connect ON DATABASE datacoves FROM PUBLIC; +GRANT connect ON DATABASE datacoves TO datacoves; +GRANT connect ON DATABASE datacoves TO postgres; +``` + +A way to generate passwords: `python -c 'import secrets; print(secrets.token_urlsafe())'`. +Avoid special characters, they cause issues with some services, such as airflow. + +Please share this password with the Datacoves team. + +## Active Directory groups + +Roles/groups required for datacoves users: + +``` +JNJ-APP-{division}-DATACOVES-ADMIN +JNJ-APP-{division}-DATACOVES-DEVELOPER +JNJ-APP-{division}-DATACOVES-VIEWER +JNJ-APP-{division}-DATACOVES-KTLO +``` + +Substitute your `{division}`, e.g. `PCE`, `HMD`, `CHAP`, etc. + +## Ping identity account + +Submit a ticket to [Web Single Sign-On - SAML Federation](https://jnjprod.service-now.com/iris?id=sc_cat_item&sys_id=8fa9a4a4f88c81402b7d832c9cb96435&sysparm_category=a96e3c1f1d7ff100b7633835df15e2d1) +to create a ping account. + +### IRIS Request + +#### Short Description + +This is a request to enable SSO for cluster. + +#### Description + +Need to add PingID to application. + +#### Groups + +Need groups only filtered to ones that have the following pattern JNJ-APP--DATACOVES-\* + +#### Type + +Choose: OAuth/OpenID Connect + +#### Client id + +It should be any name for your cluster (e.g. `chapsbx`, `emea_ensemble_test`, `emea_artemis_dev`, etc.). + +#### Redirect urls + +`https://api.{cluster_domain}/complete/ping_federate` + +#### Additional fields + +Requires interactive electronic signatures using SSO: No +Attributes: groups, openid, profile, email + +When the Iris request is fulfilled, you will receive an email with: + +- Client ID (verify this is the one that was requested) +- Client Secret +- A list of OAuth endpoints + +Please share this information with the Datacoves team. + +## Airflow + +### EFS file system for airflow logs + +Follow the instructions to "Create EFS in AWS Account" from [this confluence page](https://confluence.jnj.com/display/AGAP/AWS+EFS+Persistent+Storage+-+AWS+EFS+CSI+Driver). Don't follow the other sections of the page. + +As a name use datacoves-[cluster id]-[environment slug]-airflow-logs. + +It's important to attach the right the EKS security group so the EKS cluster has access to the EFS filesystem. You can find the security group id in the EKS cluster admin page, Networking tab, under `Additional security groups`. + +### S3 bucket for Airflow dags + +Due to bitbucket scheduled downtimes we recommend using S3 as the DAGs store to mimimize disruptions. + +1. Create an S3 bucket per environment, i.e. datacoves-[cluster id]-[environment slug]-airflow-dags (datacoves-ensemble-pro001-airflow-dags) +2. Create an IAM policy that grants read/write access to the new S3 bucket created, use the same name convention used for the S3 bucket. +3. Follow [this instructions](https://confluence.jnj.com/display/AGAP/AWS+IAM+Roles+for+Kubernetes+Service+Accounts) to create an IAM Role, up to "Create IAM Role For K8s Service Account", attach the policy you created on step 2. Name the IAM role using the same convention you used for the S3 bucket +4. Do not associate the IAM role to a K8s Service Account, that part is managed by Datacoves. +5. Create a IAM user for jenkins to upload the dbt project and dags to S3. Use the same naming convention. Attach the same policy you created on step 2. + +#### Trusted policy example: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "Federated": "arn:aws:iam::327112934799:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB" + }, + "Action": "sts:AssumeRoleWithWebIdentity", + "Condition": { + "StringLike": { + "oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB:sub": "system:serviceaccount:dcw-dev123:dev123-airflow-*", + "oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB:aud": "sts.amazonaws.com" + } + } + } + ] +} +``` + +## DBT API + +- Create an S3 bucket. +- Choose a bucket name, we suggest using _dbt_api where could be `ensemble`, `ensembletest`, etc. +- Create an IAM user with a policy to access the bucket, like the one below, + replacing `{your_bucket_name}` with your bucket's name. +- Create an access key for the user. Share it with the Datacoves team. + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:DeleteObject", + "s3:DeleteObjectVersion" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}/*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}" + } + ] +} +``` + +## Grafana + +Grafana requires an S3 bucket with lifecycle management enabled. +Follow [this guide](grafana-loki-storage-config-providers.md) to configure it accordingly. + +## Airbyte + +- S3 bucket for airbyte logs, an IAM user with a policy to access it, and an + access key for the user. + +### S3 bucket for airbyte logs + +- Create an S3 bucket. +- Create an IAM user with a policy to access the bucket, like the one below, + replacing `{your_bucket_name}` with your bucket's name. +- Create an access key for the user. Share it with the Datacoves team. + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:DeleteObject", + "s3:DeleteObjectVersion" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}/*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}" + } + ] +} +``` + +## Data warehouse connection templates + +Please define how your data warehouse architecture will look and define the connection templates for both Analytics Engineers and Services, I.e. on a Snowflake database you’ll need to specify fields such as account, warehouse, database, role. + +## Terraform + +Some work has been done (repo: [itx-azt/iac][iac]) to automate the creation of +these cluster requirements using terraform. However, because of authorization +restrictions imposed on terraform in jnj, it still requires manual +intervention. At the moment it is probably faster overall to do everything +manually. + +[iac]: https://sourcecode.jnj.com/projects/ITX-AZT/repos/iac diff --git a/docs/client-docs/jnj/2-configuration.md b/docs/client-docs/jnj/2-configuration.md new file mode 100644 index 00000000..5bbfdde9 --- /dev/null +++ b/docs/client-docs/jnj/2-configuration.md @@ -0,0 +1,83 @@ +# Configuring datacoves + +Configuration for each cluster is kept in a separate repository. They are +mounted as git submodules under `config/{cluster_domain}`. + +You will need to create this git repository if there isn't one already for your +cluster. Grant access to datacoves staff to this repo so we can initialize the +configuration files and add the people that will do configuration or deployment +to the git secret keyring. + +Clone this configuration to make changes to it. Alternatively, if you will run +the datacoves deployment from the same machine you can clone the datacoves_deployment +repository which has the configuration repos as [git submodules](https://git-scm.com/book/en/v2/Git-Tools-Submodules). + +```bash +git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git +cd datacoves_deployment +git submodule init +git submodule update config/$cluster_domain # Specify the path to the submodule to update. +cd config/$cluster_domain # Config repo cloned as submodule in here. +``` + +After the initial setup, the workflow to update configuration is as follows: + +```bash +# From within the cluster configuration repo. + +# 1. Fetch the latest configuration. +git checkout main +git pull +git secret reveal -f + +# 2. Make your changes (see below what's required). + +# 3. Commit and push your changes. +git secret hide +git diff # Review your changes, all sensitive data should be encrypted. +git add . +git commit -m 'Updated secrets/configuration.' +git push +``` + +## What values are required? + +Initially the configuration files will contain `TODO` comments to mark the +places where values need to be filled in. Run `grep -r . TODO` to see what's +pending. Remove the `TODO` comments when you add a value. Most values are used +to configure the external services that were created during[cluster setup](./1-cluser-requirements.md). + +The configuration variable names should give you an indication of what's needed. +If in doubt, ask. + +The requirements for each datacoves service follow. The list may be a useful +guide but it could be out of date. Please rely on the `TODO` marks, not on the +list, as authoritative information. + +### Datacoves core + +- Datacoves api DB host (`DB_HOST`) and password (`DB_PASS`) in `secrets/core-api.env` +- PING_CLIENT_ID and PING_CLIENT_SECRET in `secrets/core-api.env` +- Ping group names in `cluster-params.yaml`, under `project`. +- Postgres DB Provisioner for services such as airbyte/airfow/superset in `cluster-params.secret.yaml` under `postgres_db_provisioner`. + +### DBT Docs + +- Deploy credentials in `cluster-params.secret.yaml` under `deploy_credentials`. + +### Airbyte + +Not yet documented. + +### Airflow + +The EFS CSI driver installed by cloudx is usually outdated (v1.0.0) so we need to opt out from the cloudx managed service. + +To do so, submit a PR to have Cloudx stop managing the currently installed driver here: https://sourcecode.jnj.com/projects/ITX-AED/repos/cloudx_container_pipelines_configs/browse/argocd/config.yaml#19 + + +- Airflow EFS volume_handle (fs id) in: `environments/dev123/airflow.secret.yaml` + +### Superset + +Not yet documented. diff --git a/docs/client-docs/jnj/3-configure-bastion-ec2-instance.md b/docs/client-docs/jnj/3-configure-bastion-ec2-instance.md new file mode 100644 index 00000000..771629fa --- /dev/null +++ b/docs/client-docs/jnj/3-configure-bastion-ec2-instance.md @@ -0,0 +1,98 @@ +# Configure Bastion EC2 instance + +## JNJ + +Name: +Host: AWSAZTIRLL000Q.jnj.com + +### SSH to instance + +1. In your AWS workspace/Microsoft Remote Desktop (windows) open a terminal `ssh 10.157.82.138 -m hmac-sha2-512` or +2. Create a shortcut to ssh pointing to `C:\Windows\System32\OpenSSH\ssh.exe 10.157.82.138 -m hmac-sha2-512` +3. Click on the shortcut and type your password to access the instance + +## CHAP + +Name: itx-wcr-EKS workstation +Host: awswcrnval001n.kenvue.com + +### Request role + +In your **Remote Desktop** go to [IAM](iam.kenvue.com): + +1. Request / Star a new request +2. Request the following roles: + - ITS-ITX-WCR-Datacove-Prd-K8sOperator + - ITS-ITX-WCR-Datacove-Prd-K8sMonitor + - ITS-ITX-WCR-Datacove-Prd-K8sAdmin + - ITS-EP-AWSWCRNVAL001N-LINUX-NA-UNIXSEAdmins +3. Details: + - Job role: Datacoves Support + - Application ID: APP000300001207 + - Application Name: DATACOVES-ANALYTICS PRODUCTION WORKBENCH FOR ELT & ORCHESTRATION + - Describe, in detail, the job functions you perform that REQUIRE this level of privilege: We maintain and support the Datacoves application which runs on Kubernetes. + - Is the Application Software (includes Web Components, Vendor Application), installed on the Server on which you are requesting Admin Access? No / Yes: No + - Frequency of Need: Weekly +4. Submit + +### SSH to instance + +1. On the terminal run command `ssh 10.79.29.123` +2. Your user should be added to the following groups in `/etc/groups` + +## Create your working directory + +Create your working directory under `/app/users`, i.e. `/app/users/ssassi`. + +### Grant you access to docker + +```shell +sudo su - +vi /etc/group +``` + +Example: + +```shell +datacoves:x:8653:amorer01, # To chap +docker:x:187:amorer01, +``` + +### Configure your home folder (~) + +1. Copy the contents of `/app/users/datacoves-home-template` to your home folder: + +```shell +cp -R /app/users/datacoves-home-template/. ~/ +``` + +2. Exit and reconnect to the instance to ensure that the `.bashrc` script was ran accordingly +3. Fix kubelogin permissions + +```shell +asdf uninstall kubelogin +asdf install kubelogin +``` + +5. Configure your credentials to the clusters + +```shell +kc config get-contexts +kc config use-context +kc get ns +``` + +Note: you'll need to change your ~/.kube/config permissions: + +```shell +chmod 600 ~/.kube/config +``` + +## Clone datacoves deployment repo + +```shell +/app/users/ +git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git +``` + +After clonning, follow the instructions to reveal secrets and install requirements. diff --git a/docs/client-docs/jnj/4-configure-bitbucket-and-jenkins.md b/docs/client-docs/jnj/4-configure-bitbucket-and-jenkins.md new file mode 100644 index 00000000..89e237a8 --- /dev/null +++ b/docs/client-docs/jnj/4-configure-bitbucket-and-jenkins.md @@ -0,0 +1,88 @@ +# How to configure Bitbucket project and connect it with Jenkins project + +## Bitbucket + +### Ensure you enable the following hooks in your Bitbucket project + +- JnJ VPCx - Post Receive Repository Hook for SCM +- Webhook to Jenkins for Bitbucket Server + +![Bitbucket hooks](img/bitbucket-project-settings-hooks.png) + +### JnJ VPCx - Post Receive Repository Hook for SCM + +![Post Receive Repository Hook for SCM](img/bitbucket-project-settings-post-receive-repository-hook.png) + +### Webhook to Jenkins for Bitbucket Server + +#### Tab 1 + +![Webhook tab 1](img/bitbucket-project-settings-jenkins-webhook-tab1.png) + +#### Tab 2 + +![Webhook tab 2](img/bitbucket-project-settings-jenkins-webhook-tab2.png) + +#### Tab 3 + +![Webhook tab 3](img/bitbucket-project-settings-jenkins-webhook-tab3.png) + +### Enable the following Merge Checks + +![Merge Checks](img/bitbucket-project-settings-merge-checks.png) + +### Request access to taqy-docker for the project service account + +Typically the service account created automatically is `sa-itsus--devusr`. + +Go to App Dev Tools and request access for that user, like so: + +![App Dev Tools](img/app-dev-tools-artifactory-sa.png) + +## Jenkins + +### Ensure Bitbucket plugins were correctly configured + +Navigate to Manage Jenkins -> Configure System and modify the following plugins: + +![Bitbucket Server](img/jenkins-configuration-bitbucket-server.png) + +![Bitbucket Notifier](img/jenkins-configuration-bitbucket-notifier.png) + +### Create Multibranch pipeline project + +At Home page -> "+ New Item": + +![Multibranch pipeline project](img/jenkins-create-multi-branch-pipeline.png) + +### Configure branch sources + +![Branch sources](img/jenkins-settings-branch-sources.png) + +### Configure repo behaviors + +![Branch repo behaviors](img/jenkins-settings-behaviors.png) + +### Set up build configuration and other items + +![Build configuration](img/jenkins-settings-build-configuration.png) + +![Other items](img/jenkins-settings-other.png) + +## Jenkinsfile dependencies + +You'll need a credential that stores the secrets used to connect to your Data Warehouse. + +Create a new credential in the Jenkins Admin area. As of Aug. '23 those can be found in: + +`Dashboard -> Credentials -> System -> Global Credentials (unrestricted)` + +![New credential](img/jenkins-add-new-credential-0.png) + +![New credential](img/jenkins-add-new-credential.png) + + +## Known issues + + +* When "pre hook declined" it could be due to JIRA issues configuration: from settings -> `Jira Issues` select "Use custom settings" and be sure "Don't need a Jira issue key" is selected \ No newline at end of file diff --git a/docs/client-docs/jnj/5-deployment.md b/docs/client-docs/jnj/5-deployment.md new file mode 100644 index 00000000..f4bd0e8b --- /dev/null +++ b/docs/client-docs/jnj/5-deployment.md @@ -0,0 +1,38 @@ +## How to deploy (or update) datacoves to a kubernetes cluster + +Prerequisites: [cluster and external resources setup](./1-cluster-requirements). + +SSH into a machine with kubectl access to the cluster from where you will run +the installation scripts. Then: + +```bash +# Set these as needed for your cluster. +cluster_domain=FILL_IN # e.g. ensembletest.apps.jnj.com +kubectl_context=FILL_IN # e.g. itx-ank-ensemble-test + +# Clone the repository into the installation workstation (required once). +git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git +cd datacoves_deployment + +# Install python dependencies for the installation scripts (required once). +pip3 install --user -r requirements.txt + +# Fetch the latest changes and reveal the secrets in the config submodule directory. +git pull +git submodule update --init +(cd config/$cluster_domain; git secret reveal -f) + +# Install datacoves base dependencies into the cluster (ingress-nginx, etc.) +# Usually not required after the first time datacoves is released to a cluster. +./cli.py setup_base $kubectl_context $cluster_domain + +# Deploying ingress-nginx will create an ELB. Use the following command to retrieve it's URL. +kubectl --context $kubectl_context get -A svc | grep LoadBalancer + +# Update cluster-params.yaml setting external_dns_url to that URL. +$EDITOR config/$cluster_domain/cluster-params.yaml +# Commit the change. + +# Install/update datacoves. +./cli.py install +``` diff --git a/docs/client-docs/jnj/6-access.md b/docs/client-docs/jnj/6-access.md new file mode 100644 index 00000000..81c824fb --- /dev/null +++ b/docs/client-docs/jnj/6-access.md @@ -0,0 +1,72 @@ +# Access Datacoves on JNJ clusters + +## Requesting Roles + +> **NOTE:** Please inform Martin Ryan before requesting appdevtools roles + +In order to have access to all third-party tools (Bitbucket, Jenkins, Artifactory, etc.) you must request specific roles. + +To do so, you must go to https://appdevtools.jnj.com and request the `Datacoves Support` Model User template. + +Make sure to write `Needed for Datacoves platform support` as requesting reason. + +![AppDevTools User Template](img/app-dev-tools-user-model.png) + +## Datacoves Access + +In order to get access as an admin and developer on the different JNJ clusters you need to request the following AD groups: + +### Medical Devices + +- JNJ-APP-HMD-DATACOVES-ADMIN +- JNJ-APP-HMD-DATACOVES-DEVELOPER + +### Consumer Health / Kenvue + +Go to iam.kenvue.com, search for `Datacoves`. There's one role for Admin and one per Environment. You need to request `ADMIN`, `*-DEVELOPER` and `*-SYSADMIN` roles. For example: + +- ITS_APP_DATACOVES_ADMIN +- ITS_APP_DATACOVES_DEV_CBI_VMA_DEVELOPER +- ITS_APP_DATACOVES_DEV_CBI_VMA_SYSADMIN +- ITS_APP_DATACOVES_DEV_COMX_CUST_DEVELOPER +- ITS_APP_DATACOVES_DEV_COMX_CUST_SYSADMIN +- ITS_APP_DATACOVES_DEV_COMX_GLOBAL_DEVELOPER + +### Ensemble + +- JNJ-APP-PCE-DATACOVES-ADMIN +- JNJ-APP-PCE-DATACOVES-DEVELOPER + +### R&D + +- ITS-APP-DEV-JRDDATACOVES-ADMIN +- ITS-APP-DEV-JRDDATACOVES-DEVELOPER + +## taqy Access + +taqy is the docker repository used by all J&J instances. Access to it is necessary in order to manage images on it. + +To request access, use https://appdevtools.jnj.com + +- Request Access, By User, Other +- Enter your username +- Tool: EAT Jenkins and Artifactory +- Team: taqy +- Reason for request: Access to CI images for DataCoves +- Grant these roles: ITS-ASx-TAQY-DEV-Executors, ITS-ASx-TAQY-DEV-Viewers + +For reference, the main Ensemble Jenkins user is sa-itsus-jbfl-devusr + +## Snowflake Access + +As done with the groups above, you must also request `JNJ-APP-PCE-SNOWFLAKE-EMEA-DEVELOPER` + +### How to request the groups? + +Using the AWS workspace: + +1. Navigate to https://iam.jnj.com +2. Click on `Start new request` +3. Type the group name on the `Find a service item` search box. +4. Click on `Request` button +5. In the popup, leave `Valid from` and `Valid until` empty, in the `reason` field type "Datacoves support team". diff --git a/docs/client-docs/jnj/7-configure-sa-docker-in-kubernates.md b/docs/client-docs/jnj/7-configure-sa-docker-in-kubernates.md new file mode 100644 index 00000000..3fc5e09e --- /dev/null +++ b/docs/client-docs/jnj/7-configure-sa-docker-in-kubernates.md @@ -0,0 +1,56 @@ +# How to configure Service Account Docker in Kubernetes for pull images. + +**JnJ** and **Kenvue** are using their own private Docker artifact repositories. In order to download images from those repositories in Kubernetes we need to create secrets with valid credentials in each Kubernetes cluster. + +This process is documented by JnJ at [conflunce](https://confluence.jnj.com/display/EAKB/Artifactory+-+Docker+Image+Pull+Through+Cache+For+Trusted+Public+Registries). + + +## Select Kubernates context + +```bash +kubectl config get-contexts +kubectl config use-context +``` + +## Delete old service account (If it already exists) + +```bash +kubectl get secrets -n default +kubectl delete secret taqy-docker -n default +``` + +## Create new service account + +```bash +# Create secret in default namespace - Recommended to use the EAT service account username and password for credentials +kubectl create secret docker-registry taqy-docker --docker-server=jnj.artifactrepo.jnj.com --docker-username= --docker-password= -n default + +# Annotate secret to sync across all namespaces +kubectl annotate secret taqy-docker cluster.managed.secret="true" -n default +``` + +## Inspect the new secret + +```bash +kubectl -n default get secret taqy-docker -o yaml +``` + +Copy the value from `data.dockerconfigjson` + +```bash +echo | base64 -d +``` + +Note: Check that the secrets have been replicated to all namespaces. (Can check one or two) + +``` +kubectl -n get secret taqy-docker -o yaml +echo | base64 -d +``` + +If the secret was not replicated, check the pod's logs: + +```bash +kubectl -n kube-system get pods +kubectl -n kube-system logs namespace-secrets-sync- --tail 100 +``` \ No newline at end of file diff --git a/docs/client-docs/jnj/8-summary-requirements-new-cluster.md b/docs/client-docs/jnj/8-summary-requirements-new-cluster.md new file mode 100644 index 00000000..d6787892 --- /dev/null +++ b/docs/client-docs/jnj/8-summary-requirements-new-cluster.md @@ -0,0 +1,113 @@ +# Summary for the requirements of a new Cluster. + +For more details check [Cluster requirements](./1-cluster-requirements.md) + +## Database (RDS) + +### Minimum requirements + +- Engine: Postgres +- Version: 14.9 +- Multi-AZ DB Cluster. +- Master user: postgres +- Master password: +- Instance class: db.r5.large +- Storage type: Aurora Standard or gp2 +- Allocated_storage: 100GB +- Enable storage autoscaling +- Maximum storage threshold: 1TB +- Authentication: password + + +## EKS + +### Configuration + +- External DNS. +- `m5.xlarge` instances. + +### Worker groups + +* General +* Volumed +* Workers + +#### General + +- min_nodes: 1 +- max_nodes: 30 +- root_volume_size: 200 +- labels: + +```yaml +labels: + ... + - key: k8s.datacoves.com/nodegroup-kind + value: general +``` + +#### Volumed + +- min_nodes: 1 +- max_nodes: 30 +- root_volume_size: 200 +- labels: + +```yaml +labels: + ... + - key: k8s.datacoves.com/nodegroup-kind + value: volumed +``` + +#### Workers + +- min_nodes: 1 +- max_nodes: 30 +- root_volume_size: 200 +- labels: + +```yaml +labels: + ... + - key: k8s.datacoves.com/workers + value: enabled +``` + + +## Other configuration. + +- EFS for each environment for **Airflow Logs**. +- S3 buckets for each environment for **Dags sync**, with read-only permissions. (Optional. Can be git-sync). +- One S3 bucket for **Observavility stack**. Example `ensemble-prd-observability-grafana-loki`. (Full permissions) +- One S3 bucket for **dbt-api**. Example `ensemble-prd-dbt-api`. (Full permissions) + + +### Example for full S3 bucket permission + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:DeleteObject", + "s3:DeleteObjectVersion" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}/*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}" + } + ] +} +``` \ No newline at end of file diff --git a/docs/client-docs/jnj/README.md b/docs/client-docs/jnj/README.md new file mode 100644 index 00000000..cbd13949 --- /dev/null +++ b/docs/client-docs/jnj/README.md @@ -0,0 +1,14 @@ +# Datacoves deployment + +This repository contains the datacoves installation scripts. They install +datacoves to an existing EKS cluster, based on the configuration files in the +`config` directory. Configuration for each cluster is kept in a separate +repository. They are mounted as git submodules under `config/{cluster_domain}`. + +Before running the installation scripts the EKS cluster and other required AWS +resources must be created. See [cluster requirements](./1-cluster-requirements.md). + +Then a repository to use as the cluster configuration submodule must be created. +See [configuration](./2-configuration.md). + +After that, deployment can begin. See [deployment](./3-deployment.md). diff --git a/docs/client-docs/jnj/img/app-dev-tools-artifactory-sa.png b/docs/client-docs/jnj/img/app-dev-tools-artifactory-sa.png new file mode 100644 index 00000000..a3c33b68 Binary files /dev/null and b/docs/client-docs/jnj/img/app-dev-tools-artifactory-sa.png differ diff --git a/docs/client-docs/jnj/img/app-dev-tools-user-model.png b/docs/client-docs/jnj/img/app-dev-tools-user-model.png new file mode 100644 index 00000000..c7188869 Binary files /dev/null and b/docs/client-docs/jnj/img/app-dev-tools-user-model.png differ diff --git a/docs/client-docs/jnj/img/bitbucket-project-settings-hooks.png b/docs/client-docs/jnj/img/bitbucket-project-settings-hooks.png new file mode 100644 index 00000000..f2e77ac3 Binary files /dev/null and b/docs/client-docs/jnj/img/bitbucket-project-settings-hooks.png differ diff --git a/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab1.png b/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab1.png new file mode 100644 index 00000000..c4c53243 Binary files /dev/null and b/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab1.png differ diff --git a/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab2.png b/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab2.png new file mode 100644 index 00000000..a046e9b4 Binary files /dev/null and b/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab2.png differ diff --git a/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab3.png b/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab3.png new file mode 100644 index 00000000..9bec4057 Binary files /dev/null and b/docs/client-docs/jnj/img/bitbucket-project-settings-jenkins-webhook-tab3.png differ diff --git a/docs/client-docs/jnj/img/bitbucket-project-settings-merge-checks.png b/docs/client-docs/jnj/img/bitbucket-project-settings-merge-checks.png new file mode 100644 index 00000000..60e9ba42 Binary files /dev/null and b/docs/client-docs/jnj/img/bitbucket-project-settings-merge-checks.png differ diff --git a/docs/client-docs/jnj/img/bitbucket-project-settings-post-receive-repository-hook.png b/docs/client-docs/jnj/img/bitbucket-project-settings-post-receive-repository-hook.png new file mode 100644 index 00000000..5993e6d4 Binary files /dev/null and b/docs/client-docs/jnj/img/bitbucket-project-settings-post-receive-repository-hook.png differ diff --git a/docs/client-docs/jnj/img/jenkins-add-new-credential-0.png b/docs/client-docs/jnj/img/jenkins-add-new-credential-0.png new file mode 100644 index 00000000..e90a5429 Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-add-new-credential-0.png differ diff --git a/docs/client-docs/jnj/img/jenkins-add-new-credential.png b/docs/client-docs/jnj/img/jenkins-add-new-credential.png new file mode 100644 index 00000000..68316cab Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-add-new-credential.png differ diff --git a/docs/client-docs/jnj/img/jenkins-configuration-bitbucket-notifier.png b/docs/client-docs/jnj/img/jenkins-configuration-bitbucket-notifier.png new file mode 100644 index 00000000..6ba81c0b Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-configuration-bitbucket-notifier.png differ diff --git a/docs/client-docs/jnj/img/jenkins-configuration-bitbucket-server.png b/docs/client-docs/jnj/img/jenkins-configuration-bitbucket-server.png new file mode 100644 index 00000000..e176ecfe Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-configuration-bitbucket-server.png differ diff --git a/docs/client-docs/jnj/img/jenkins-create-multi-branch-pipeline.png b/docs/client-docs/jnj/img/jenkins-create-multi-branch-pipeline.png new file mode 100644 index 00000000..0f62ec7a Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-create-multi-branch-pipeline.png differ diff --git a/docs/client-docs/jnj/img/jenkins-settings-behaviors.png b/docs/client-docs/jnj/img/jenkins-settings-behaviors.png new file mode 100644 index 00000000..7d923c20 Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-settings-behaviors.png differ diff --git a/docs/client-docs/jnj/img/jenkins-settings-branch-sources.png b/docs/client-docs/jnj/img/jenkins-settings-branch-sources.png new file mode 100644 index 00000000..77487eba Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-settings-branch-sources.png differ diff --git a/docs/client-docs/jnj/img/jenkins-settings-build-configuration.png b/docs/client-docs/jnj/img/jenkins-settings-build-configuration.png new file mode 100644 index 00000000..f8fcbb35 Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-settings-build-configuration.png differ diff --git a/docs/client-docs/jnj/img/jenkins-settings-other.png b/docs/client-docs/jnj/img/jenkins-settings-other.png new file mode 100644 index 00000000..bed780c1 Binary files /dev/null and b/docs/client-docs/jnj/img/jenkins-settings-other.png differ diff --git a/docs/client-docs/kenvue/how-to-setup-helm-chart.md b/docs/client-docs/kenvue/how-to-setup-helm-chart.md new file mode 100644 index 00000000..211cc2b2 --- /dev/null +++ b/docs/client-docs/kenvue/how-to-setup-helm-chart.md @@ -0,0 +1,46 @@ +# How to set up Helm Chart on kenvue + +Artifacory: https://kenvue.jfrog.io +Repository: dco-helm +Credentials: See 1password +Protocol: OCI + +Steps: + +1. Artifactory login. +2. Download or build the helm chart. +3. Upload the new helm chart. +4. Check the new helm chart. +5. Install the helm chart. + +## 1. Artifactory login + +```bash +helm registry login https://kenvue.jfrog.io/dco-helm +``` + +## 2. Build or download the helm chart. + +In this case as an example we are going to download a helm chart from the JnJ artifactory + +```bash +wget --user --password https://artifactrepo.jnj.com:443/artifactory/jnj-helm-charts/metrics-server-3.12.2.tgz +``` + +## 3. Upload the new helm chart. + +```bash + helm push metrics-server-3.12.2.tgz oci://kenvue.jfrog.io/dco-helm/metrics-server +``` + +## 4. Check the new helm chart. + +```bash +helm show all oci://kenvue.jfrog.io/dco-helm/metrics-server +``` + +## 5. Install the helm chart. + +```bash +helm install my-release oci://kenvue.jfrog.io/dco-helm/metrics-server --version 3.12.2 +``` diff --git a/docs/client-docs/orrum/README.md b/docs/client-docs/orrum/README.md new file mode 100644 index 00000000..4bdc5e0f --- /dev/null +++ b/docs/client-docs/orrum/README.md @@ -0,0 +1,75 @@ + +# Datacoves deployment + +This section contains instructions on how to connect to Orrum infra via azure to build, maintain, and monitor datacoves deployments. + +# VPN Connection + +kubectl requires connection to Orrum VPN. Download [Azure VPN Client](https://apps.apple.com/us/app/azure-vpn-client/id1553936137?mt=12). + +The profile can be downloaded from Azure; login with Support_Datacoves@orrumcorp.onmicrosoft.com with the credentials from 1Password. + +https://portal.azure.com/#@orrum.com/resource/subscriptions/0f8e4c48-c319-4ed9-af14-ef50501e3a41/resourceGroups/DataCoves/providers/Microsoft.Network/virtualNetworkGateways/DataCovesGateway/pointtositeconfiguration + +Click "Download VPN client" in the header, and you will get a zip file with the profile files; you will want the Azure client profiles, and you can use the Import button in the Azure client to import it. + + +To connect to the vpn, use Support_Datacoves@orrumcorp.onmicrosoft.com, credentials on 1Password. + +## kubectl setup + +```shell +# Ensure Python is Installed +pipx install az-cli --include-deps + +# Get login password from 1pswd +az login -u Support_Datacoves@orrumcorp.onmicrosoft.com + +# Install kubectl + kubelogin +az aks install-cli + +# Set subscription +az account set --subscription 0f8e4c48-c319-4ed9-af14-ef50501e3a41 + +# Get credentials for new cluster +az aks get-credentials --resource-group DataCoves --name Datacoves_kube + +# List contexts +kubectl config use-context Datacoves_kube +``` + +## Rename Context + +It is very important that the context be named orrum-new as things such as updating the cluster will have scripts that depend on the context name. + +``` +kubectl config rename-context Datacoves_kube orrum-new +kubectl config use-context orrum-new +``` + +Now verify connectivity with `kubectl get ns` + +## Config DNS on `/etc/hosts` (Optional) + +Note: This is probably not necessary anymore. + +You can force the domain and subdomains DNS if it's not configured. + +``` +10.10.0.36 datacoves.orrum.com +10.10.0.36 api.datacoves.orrum.com +10.10.0.36 authenticate-dev123.datacoves.orrum.com +10.10.0.36 dev123.datacoves.orrum.com +10.10.0.36 airbyte-dev123.datacoves.orrum.com +10.10.0.36 dbt-docs-dev123.datacoves.orrum.com +10.10.0.36 airflow-dev123.datacoves.orrum.com +10.10.0.36 superset-dev123.datacoves.orrum.com +10.10.0.36 grafana.datacoves.orrum.com + +# +10.10.0.36 -1-transform-dev123.datacoves.orrum.com +10.10.0.36 -1-dbt-docs-dev123.datacoves.orrum.com +10.10.0.36 -transform-dev123.datacoves.orrum.com +``` + +*Note: Check the cluster's Public IP `10.10.0.36`* diff --git a/docs/default.conf b/docs/default.conf new file mode 100644 index 00000000..80aef269 --- /dev/null +++ b/docs/default.conf @@ -0,0 +1,14 @@ +server { + listen 8080 default_server ssl; + server_name devdocs.datacoves.com; + + ssl_certificate /etc/nginx/fullchain.pem; + ssl_certificate_key /etc/nginx/privkey.pem; + + location / { + root /usr/share/nginx/html; + index index.html; + auth_basic "Who Goes There?!"; + auth_basic_user_file /etc/nginx/htpasswd; + } +} diff --git a/docs/dev-logs/2021-06-create-er-diagram.md b/docs/dev-logs/2021-06-create-er-diagram.md new file mode 100644 index 00000000..d5c83ec5 --- /dev/null +++ b/docs/dev-logs/2021-06-create-er-diagram.md @@ -0,0 +1,12 @@ +# How to create an entity relationship diagram + +```bash +./cli.py pod-sh +apt install graphviz-dev +pip3 install pygraphviz +./manage.py graph_models -a -X *Mixin,Abstract*,ContentType,Session,Nonce,Partial,TokenProxy -g -o core-erd.png +``` + +Learn more at https://django-extensions.readthedocs.io/en/latest/graph_models.html + +![ER diagram](img/core-erd.png) diff --git a/docs/dev-logs/2021-09-eks-setup.md b/docs/dev-logs/2021-09-eks-setup.md new file mode 100644 index 00000000..69e76538 --- /dev/null +++ b/docs/dev-logs/2021-09-eks-setup.md @@ -0,0 +1,110 @@ +# Installation + +## Set up IAM user + +IAM user needs the following privileges to create the cluster: + +https://eksctl.io/usage/minimum-iam-policies/ + +## AWS CLI + +Install AWS CLI in your local environment + +https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html + +## Configure credentials + +1. Generate access key +2. Configure your credentials + +## Install eksctl + +Install eksctl + +https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html + +### On Mac + +``` +brew tap weaveworks/tap +brew install weaveworks/tap/eksctl +``` + +## Create cluster + +https://eksctl.io/usage/creating-and-managing-clusters/ + +``` +eksctl create cluster -f cluster.yaml --tags service=datacoves +``` + +## Install metrics server + +https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html + +``` +kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml +``` + +## Kubernetes dashboard + +https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html + +``` +kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml +kubectl apply -f eks-admin-service-account.yaml +``` + +### Open dashboard + +``` +kubectl proxy +``` + +http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#!/login + +Get a login token with: + +``` +kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}') +``` + + +## Configure Docker hub + +``` +kubectl create ns alpha2 +kubectl create secret docker-registry docker-secret \ +--docker-server="https://index.docker.io/v1/" \ +--docker-username="" \ +--docker-password="" \ +--docker-email="" \ +--namespace="alpha2" +``` + + +## EKS (k8s on AWS) + + +```sh +# Create the cluster https://eksctl.io/usage/creating-and-managing-clusters/ +eksctl create cluster -f eks/eks-cluster.yaml + +# (Optional) Inspect the config that kustomize generates +kubectl kustomize eks + +# Apply the kustomization directory to the cluster +kubectl apply -k eks +``` + +## Kubernetes dashboard + +To open the dashboard run `kubectl proxy` and navigate to: + +http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#!/login + +```sh +# Get a login token with +kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}') +``` + diff --git a/docs/dev-logs/2021-12-eks-datacoves-beta-setup.txt b/docs/dev-logs/2021-12-eks-datacoves-beta-setup.txt new file mode 100644 index 00000000..feeca140 --- /dev/null +++ b/docs/dev-logs/2021-12-eks-datacoves-beta-setup.txt @@ -0,0 +1,139 @@ +Created the datacoves-beta cluster + + $ cd config/beta.datacoves.com + $ cd eks; eksctl create cluster -f eks-cluster.yaml`; cd .. + +Added cluster admins: https://docs.aws.amazon.com/eks/latest/userguide/add-user-role.html + + $ kc edit -n kube-system configmap aws-auth + + mapUsers: | + + - userarn: arn:aws:iam::XXXXXXXXXXXX:user/ssassi + + username: ssassi + + groups: + + - system:masters + + - userarn: arn:aws:iam::XXXXXXXXXXXX:user/spelufo + + username: spelufo + + groups: + + - system:masters + + mapAccounts: | + + - XXXXXXXXXXXX + +Enabled IRSA abd OIDC: https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html +Required by cluster-autoscaler. + + $ eksctl utils associate-iam-oidc-provider --cluster datacoves-beta --approve + +Setup cluster autoscaler: https://docs.aws.amazon.com/eks/latest/userguide/autoscaling.html + + Created the IAM policy for the autoscaler (wasn't created by eksctl): + + $ aws iam create-policy --policy-name AmazonEKSClusterAutoscalerPolicy --policy-document file://eks/cluster-autoscaler-policy.json + + Created the IAM role for the autoscaler and attach the policy: + + $ eksctl create iamserviceaccount \ + --cluster=datacoves-beta \ + --namespace=kube-system \ + --name=cluster-autoscaler \ + --attach-policy-arn=arn:aws:iam::XXXXXXXXXXXX:policy/AmazonEKSClusterAutoscalerPolicy \ + --override-existing-serviceaccounts \ + --approve + + Downloaded the autoscaler deployment yaml into base and made the necessary + modifications. The docs say the modifications that need to be done, but they + tell you to do them directly using kubectl. Didn't run those commands. Made + those changes to the yaml instead, and applied them with kustomize, so we can + track and update them if needed. The name of the role for the service account + annotation was hard to find. The `eksctl create iamserviceaccount` command + above creates it but it doesn't print the name. Had to go into the + cloudformation stack to find it. + + $ kc apply -k base + + Check that there are no auth errors in the logs + + $ kc -n kube-system logs -f deployment.apps/cluster-autoscaler + +Installed metrics-server, cert-manager and ingress-nginx by adding their yamls to base. + + $ kc apply -k base + + Check metrics-server works (can take some time): + + $ kc top pods + + Check ingress-nginx is running, then get the load balancer url (it ends with .elb..amazonaws.com): + + $ kc get -A svc | grep LoadBalancer + +Added a wildcard DNS record for *.beta.datacoves.com pointing to the ELB url above. + +Created an aurora postgres database cluster from AWS console. + + Chose Private. + Chose the eks cluster's VPC. + Chose the eks cluster's security group with ClusterSharedNodeSecurityGroup in the name. + + Check the connection from a pod in the cluster: + + $ kc run -it shell --image=python:3.9 -- bash + + # apt-get update; apt-get install postgresql-client + + # psql -U postgres -h DB_HOST_FROM_AWS_CONSOLE -W + + Create users and databases: + + Open an ssh tunnel to the RDS database through a cluster node, so we can + connect through it from outside the VPC. It will forward a connection to + your machine's localhost:63333 to the database. + + $ ssh -i config/beta.datacoves.com/keys/datacoves_beta_node_access_rsa -L 63333:DB_HOST:5432 ec2-user@CLUSTER_NODE + + Run the script to create the dbs. Note the generated passwords and save them + to the relevant config files under git secret. + + $ ./cli.py create_dbs + Please enter host: localhost + Please enter port (default: 5432): 63333 + Please enter master username: postgres + Please enter master password: + Creating database: datacoves, user: datacovesapi, pass: ************ + Creating database: airbyte, user: airbyte, pass: ************ + Creating database: superset, user: superset, pass: ************ + Creating database: airflow, user: airflow, pass: ************ + +Created s3 buckets from AWS console. + + Created a bucket for airbyte logs, named datacoves-beta-airbyte-logs. + Options: Disable public access, encrypted. + + Created a policy with permissions to access the bucket. + + $ aws iam create-policy --policy-name datacoves-beta-airbyte-logs --policy-document file://eks/airbyte-logs-policy.json + + Created a AWS IAM user with programmatic access, named datacoves-beta-airbyte-logs. + While creating, grant the policy. + + Configure the airbyte env files with the bucket information and the user's keys. + +Deploy core-api + + Created a new application in auth0 and put the credentials in core-api.env and + pomerium-config.secret.yaml + + $ ./cli.py setup_core datacoves-beta beta.datacoves.com + + $ kcc exec -it $(kubectl -n core get pods -l app=core-api -o name) -- bash + + # ./manage.py migrate + # ./manage.py loaddata */fixtures/* + +Deploy operator and check logs + + $ ./cli.py setup_core datacoves-beta beta.datacoves.com + $ kco logs -l control-plane=controller-manager -c manager -f + +Setup balboa project + + $ ./cli.py setup_project datacoves-beta beta.datacoves.com balboa diff --git a/docs/dev-logs/2021-12-jnj-ensembledev-deployment.md b/docs/dev-logs/2021-12-jnj-ensembledev-deployment.md new file mode 100644 index 00000000..736aef95 --- /dev/null +++ b/docs/dev-logs/2021-12-jnj-ensembledev-deployment.md @@ -0,0 +1,132 @@ +# Datacoves deployment + +This document describes the deployment of the components of a datacoves system +to a JnJ EKS kubernetes cluster. + +## Prerequisites + +[This confluence page](https://confluence.jnj.com/display/AHRX/How+to+Deploy+a+new+Datacoves+cluster+-+Datacoves+-+How+to+guides) +should be followed prior to the steps outlined here to deploy datacoves. It +should document how to setup an EKS cluster with the necessary prerequisites, +and how to create and configure the required AWS services used. + +We assume here that there is a EKS cluster running with certain services already +deployed on it. The cluster is setup through CI from the git repo at +https://sourcecode.jnj.com/projects/ITX-AZT/repos/ensemble. +We require the following systems running in the cluster: + +* ingress-nginx as an ingress controller. +* cert-manager to issue SSL certificates. +* external-dns to create DNS rules from annotations. +* A system that creates a new kubernetes secret with a known name with + credentials to pull docker images in every namespace of the cluster. + +The machine from where the deployments scripts will be run must have python3 and +git installed, as well as kubectl (client) version 1.21 or higher, configured +to access the cluster with broad permissions. + +We also assume the docker registry / repository that you configure to pull +images has all the docker images required. Datacoves will build and push this +images. The list of images used by a cluster, computed from this repo's +configuration, can be displayed with `./cli.py images ensembledev.apps.jnj.com`, +or in general `./cli.py images CLUSTER_DOMAIN`. + + +## Initial setup and configuration + +Clone the datacoves_deployment git repository and change directory to it. + +```sh +git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git +cd datacoves_deployment +``` + +Configuration is stored in the repo, encrypted using git-secret. You will need +to be in the repo's git secret keyring to decrypt them. Ask someone already in +the keyring for access (e.g. spelufo@its.jnj.com). + +Decrypt the configuration secrets. The `-f` flag will overwrite existing files. + +``` +git secret reveal -f +``` + +The `config` directory holds configuration files. Each subdirectory holds +configuration for a kubernetes cluster and must be named after the cluster +domain name. For example, the configuration for the current (2021) version of +datacoves is in `config/ensembledev.apps.jnj.com`. + +If deploying to a new cluster, create a new directory under config based on +`config/ensembledev.apps.jnj.com`. You will need to use `git secret add` and +`git secret hide` to add your new secrets to the repo and encrypt them before +commiting them. + + +## Deploying datacoves core web application + +First, make sure your kubectl context is appropiate for the cluster. + +```sh +CLUSTER_DOMAIN=ensembledev.apps.jnj.com +KCTX=$(kubectl config current-context) + +# Deploy the datacoves core api server to the core namespace. +./cli.py setup_core "$KCTX" "$CLUSTER_DOMAIN" +``` + +Enter an api server pod and run database migrations: + +```sh +kubectl -n core exec -it $(kubectl -n core get pods -l app=core-api -o name) -- bash + +# From inside the pod: +./manage.py migrate +./manage.py loaddata */fixtures/* +``` + +Check the server is running: +``` +$ kubectl -n core get pods +NAME READY STATUS RESTARTS AGE +core-api-deployment-5f8f64cf69-6rvhd 1/1 Running 0 3d19h +``` + + +## Deploying datacoves project operator + +The datacoves project operator manages two [CRDs](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/): +datacoves.com/Project and datacoves.com/User. To deploy the operator, run: + +```sh +./cli.py setup_operator "$KCTX" "$CLUSTER_DOMAIN" +``` + +To check the operator is running, and/or see its logs: + +``` +$ kubectl -n operator-system get pods +NAME READY STATUS RESTARTS AGE +operator-controller-manager-78cc7cfb6-9ddkw 2/2 Running 0 47h + +$ kubectl -n operator-system logs -l control-plane=controller-manager -c manager -f +``` + + +## Deploying a datacoves project namespace + +Every project is deployed to a namespace named `dcp-{project_name}`. The +setup_project script creates a new namespace and project kubernetes object from +the configuration file in `config/{cluster_domain}/projects/{project_name}.yaml`. +The operator will detect changes to this object and create deployments and other +resources for the project. + +```sh +PROJECT_NAME=emeadev +./cli.py setup_project "$KCTX" "$CLUSTER_DOMAIN" "$PROJECT_NAME" +``` + +To watch for pod status changes as the operator create's the project resources: + +```sh +kubectl -n "dcp-$PROJECT_NAME" get pods --watch +``` diff --git a/docs/dev-logs/2022-04-jnj-artemisdev-configuration.md b/docs/dev-logs/2022-04-jnj-artemisdev-configuration.md new file mode 100644 index 00000000..d66010df --- /dev/null +++ b/docs/dev-logs/2022-04-jnj-artemisdev-configuration.md @@ -0,0 +1,28 @@ +## Configuring datacoves + +Requirements: Access to a datacoves configuration git repo and being in it's git secret keyring. + +First pull the latest changes and reveal the git secrets. + +```bash +git checkout main +git pull +git secret reveal -f +``` + +I've marked with `TODO` the values that need to be filled in: + +- Airflow DB connection in: `environments/dev123/airflow.secret.yaml` +- Airflow EFS volume_handle (fs id) in: `environments/dev123/airflow.secret.yaml` +- Datacoves api DB host (`DB_HOST`) and password (`DB_PASS`) in `secrets/core-api.env` +- PING_CLIENT_ID and PING_CLIENT_SECRET in `secrets/core-api.env` + +After editing those files to add the required values commit the changes with: + +```bash +git secret hide +git diff # Review your changes, all sensitive data should be encrypted. +git add . +git commit -m 'Updated secrets.' +git push +``` diff --git a/docs/dev-logs/2022-04-jnj-ensembletest-deployment.md b/docs/dev-logs/2022-04-jnj-ensembletest-deployment.md new file mode 100644 index 00000000..09397ae9 --- /dev/null +++ b/docs/dev-logs/2022-04-jnj-ensembletest-deployment.md @@ -0,0 +1,31 @@ +## Datacoves deployment + +This repository contains the datacoves installation scripts. They install datacoves to an existing EKS cluster, based on the configuration files in the `config` directory. Configuration for each cluster is kept in a separate repository. They are mounted as git submodules under `config/{cluster_domain}`. + +Prior to this, the EKS cluster and other required AWS resources must be created. The clusters are created through CloudX pipelines, from `cluster.yaml` files in other repositories like [itx-ank/ensemble](https://sourcecode.jnj.com/scm/itx-ank/ensemble). Additional AWS resources are created using terraform from the [iac](https://sourcecode.jnj.com/projects/ITX-AZT/repos/iac) repository. + +Once these prerequisites are done, and the configuration repository for the cluster has been updated accordingly, the installation is as follows. + + +```bash +# Set these as needed for your cluster. +cluster_domain=ensembletest.apps.jnj.com +kubectl_context=itx-ank-ensemble-test + +# Clone this repository into the installation workstation. +git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git +cd datacoves_deployment +git submodule update --init + +# Reveal the secrets in the config submodule directory. +(cd config/$cluster_domain; git secret reveal -f) + +# Install python dependencies for the installation scripts. +pip3 install --user -r requirements.txt + +# Install datacoves base dependencies into the cluster (ingress-nginx, etc.) +./cli.py setup_base $kubectl_context $cluster_domain + +# Install datacoves. +./cli.py install $kubectl_context $cluster_domain +``` diff --git a/docs/dev-logs/2022-05-setup-aks-postgres-flexible-server.md b/docs/dev-logs/2022-05-setup-aks-postgres-flexible-server.md new file mode 100644 index 00000000..6fc8d5f0 --- /dev/null +++ b/docs/dev-logs/2022-05-setup-aks-postgres-flexible-server.md @@ -0,0 +1,44 @@ +## Set up postgres flexible server on Azure + +1. Find it [here](https://portal.azure.com/#@datacoves.com/resource/subscriptions/91bd2205-0d74-42c9-86ad-41cca1b4822b/resourceGroups/datacoves/providers/Microsoft.DBforPostgreSQL/flexibleServers/datacoves-east-us/overview) +2. Connect to it using this command: + +``` +psql -h datacoves-east-us.postgres.database.azure.com -U dcmaster -d postgres +``` + +3. Create the `datacoves` user that will be used by Django: + +``` +CREATE USER datacoves password ''; +ALTER USER datacoves CREATEDB CREATEROLE; +GRANT datacoves TO dcmaster; +CREATE DATABASE datacoves OWNER datacoves; +GRANT CONNECT ON DATABASE datacoves TO datacoves; +``` + +4. Dump data from internal Database + +``` +pg_dump -U postgres -h postgres-svc -d datacoves -Fc > dump.sql +``` + +5. Restore data on new Azure DB + +``` +pg_restore -U datacoves -h datacoves-east-us.postgres.database.azure.com -d datacoves --no-owner --role=datacoves dump.sql +``` + +6. Repeate steps 4 and 5 with the rest of the services that need to be migrated + +Keep in mind that database objects owner could be changed, reassign the owner to the corresponding service account, i.e.: + +``` +REASSIGN OWNED BY datacoves TO dev123_airbyte; +``` + +If migrating `temporal` and `temporal_visibility` databases, you also need to update the database name on `schema_versions`. + +7. Set `airbyte_db_external: true`, `airflow_db_external: true` and `superset_db_external: true` accordingly + +8. Configure `postgres_db_provisioner` using the master user connection/credentials diff --git a/docs/dev-logs/README.md b/docs/dev-logs/README.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/dev-logs/img/core-erd.png b/docs/dev-logs/img/core-erd.png new file mode 100644 index 00000000..f2a3a77c Binary files /dev/null and b/docs/dev-logs/img/core-erd.png differ diff --git a/docs/docker-compose.yaml b/docs/docker-compose.yaml new file mode 100644 index 00000000..b5728d14 --- /dev/null +++ b/docs/docker-compose.yaml @@ -0,0 +1,11 @@ +--- +version: "3.7" + +services: + devdocs: + build: + context: ./ + dockerfile: Dockerfile + init: true + ports: + - "8080:8080" diff --git a/docs/docs_output/assets/cove.jpeg b/docs/docs_output/assets/cove.jpeg new file mode 100644 index 00000000..24c8fb4f Binary files /dev/null and b/docs/docs_output/assets/cove.jpeg differ diff --git a/docs/docs_output/assets/custom.js b/docs/docs_output/assets/custom.js new file mode 100644 index 00000000..009f2b7d --- /dev/null +++ b/docs/docs_output/assets/custom.js @@ -0,0 +1,158 @@ +/* + * Custom JS items for the site + */ + +jQuery(document).ready(function() { + // Slide reveal for sidebar. + jQuery('div.sidebar-toggle-button').click(function(ev) { + jQuery('body').toggleClass('close'); + }); + + // Fix styles on sidebar -- everything gets 'file' by default + jQuery('div.sidebar-nav ul').find('li').addClass('file'); + + // But for li's containing ul's, that li gets 'folder' instead. + jQuery('div.sidebar-nav ul').find('li > ul') + .parent() + .removeClass('file') + .addClass('folder'); + + // Fix duplicate footer + jQuery('footer').not('footer.keeper').remove(); + + /* + * Handle the sidebar 'ladder' correctly, popping out the items we + * wish to see. + * + * Find the selected file by its href. + */ + var selected_file = window.location.pathname.replaceAll('"', '\\"'); + + // We want its parent li + var selected_link = jQuery('div.sidebar-nav ul') + .find('a[href="' + selected_file + '"]') + .parent(); + + // If we have something, add the proper classes to it. + if (selected_link.length) { + selected_link.addClass('active'); + + if (selected_link.hasClass('folder')) { + selected_link.addClass('open'); + } + + selected_link.parents('li').addClass('open'); + } + + // Update title + var potential_titles = jQuery('li.active a'); + + if (potential_titles.length) { + jQuery('head title').text( + 'Datacoves Docs - ' + potential_titles.text() + ); + } + + // Create a map of URLs to titles + var titleMap = {}; + + jQuery('div.sidebar-nav').find('a').each(function() { + var jqThis = jQuery(this); + titleMap[jqThis.attr('href')] = jqThis.text(); + }); + + // Clear search + jQuery('#searchbox').val(''); + + // Set up search + jQuery.ajax( + '/assets/search-index.js', + { + dataType: 'text', + method: 'GET', + success: function (data) { + var index = MiniSearch.loadJSON(data, {fields: ['text']}); + + jQuery('#searchbox').on('input', function (ev) { + var results = index.search( + jQuery(this).val(), + { + prefix: true, + fuzzy: 0.3, + } + ); + + // Clear results div + var resultsDiv = jQuery('#resultsbox'); + resultsDiv.empty(); + + // Take the first 10 results + for (var i = 0; i < results.length && (i < 10); i++) { + var newDiv = jQuery('
'); + var newA = jQuery(''); + var newH2 = jQuery('

'); + var newP = jQuery('

'); + + if (titleMap[results[i].url]) { + newH2.html(titleMap[results[i].url]); + } else { + newH2.text("Datacoves Page"); + } + + // Add a snip + newP.html( + results[i].snip.substr( + results[i].snip.toLowerCase().indexOf( + results[i].queryTerms[0].toLowerCase() + ), + 50 + ) + + + "..." + ); + + newA.attr('href', results[i].url); + + newA.append(newH2); + newA.append(newP); + newDiv.append(newA); + resultsDiv.append(newDiv); + } + + resultsDiv.addClass('show'); + jQuery('div.search').addClass('show'); + jQuery('button.clear-button').addClass('show'); + }); + } + } + ); + + // Handle clear button + jQuery('button.clear-button').click(function (ev) { + jQuery('#resultsbox').empty().removeClass('show'); + jQuery('div.search').removeClass('show'); + jQuery('button.clear-button').removeClass('show'); + jQuery('#searchbox').val('');; + }); +}); + +// Add edit on github support +function editOnGitHub() +{ + // What's our github URL? + var url = 'https://github.com/datacoves/docs/edit/main/docs'; + + url = url + window.location.pathname; + + // Is it a readme file? + if (!window.location.pathname.endsWith('.html')) { + url = url + 'README.md'; + } else { + // replace .html with .md + url = url.replace(/\.html$/, '.md') + } + + window.open(url, '_blank'); + + return false; +} diff --git a/docs/docs_output/assets/dc-logo.svg b/docs/docs_output/assets/dc-logo.svg new file mode 100644 index 00000000..cb18de30 --- /dev/null +++ b/docs/docs_output/assets/dc-logo.svg @@ -0,0 +1,11 @@ + + + + + + + + + + + diff --git a/docs/docs_output/assets/favicon.ico b/docs/docs_output/assets/favicon.ico new file mode 100644 index 00000000..60b5476a Binary files /dev/null and b/docs/docs_output/assets/favicon.ico differ diff --git a/docs/docs_output/assets/pygment.css b/docs/docs_output/assets/pygment.css new file mode 100644 index 00000000..c4b2fd9c --- /dev/null +++ b/docs/docs_output/assets/pygment.css @@ -0,0 +1,75 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.codehilite .hll { background-color: #ffffcc } +.codehilite { background: #f8f8f8; } +.codehilite .c { color: #3D7B7B; font-style: italic } /* Comment */ +.codehilite .err { border: 1px solid #FF0000 } /* Error */ +.codehilite .k { color: #008000; font-weight: bold } /* Keyword */ +.codehilite .o { color: #666666 } /* Operator */ +.codehilite .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */ +.codehilite .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */ +.codehilite .cp { color: #9C6500 } /* Comment.Preproc */ +.codehilite .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */ +.codehilite .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */ +.codehilite .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */ +.codehilite .gd { color: #A00000 } /* Generic.Deleted */ +.codehilite .ge { font-style: italic } /* Generic.Emph */ +.codehilite .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */ +.codehilite .gr { color: #E40000 } /* Generic.Error */ +.codehilite .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.codehilite .gi { color: #008400 } /* Generic.Inserted */ +.codehilite .go { color: #717171 } /* Generic.Output */ +.codehilite .gp { color: #000080; font-weight: bold } /* Generic.Prompt */ +.codehilite .gs { font-weight: bold } /* Generic.Strong */ +.codehilite .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.codehilite .gt { color: #0044DD } /* Generic.Traceback */ +.codehilite .kc { color: #008000; font-weight: bold } /* Keyword.Constant */ +.codehilite .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */ +.codehilite .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */ +.codehilite .kp { color: #008000 } /* Keyword.Pseudo */ +.codehilite .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */ +.codehilite .kt { color: #B00040 } /* Keyword.Type */ +.codehilite .m { color: #666666 } /* Literal.Number */ +.codehilite .s { color: #BA2121 } /* Literal.String */ +.codehilite .na { color: #687822 } /* Name.Attribute */ +.codehilite .nb { color: #008000 } /* Name.Builtin */ +.codehilite .nc { color: #0000FF; font-weight: bold } /* Name.Class */ +.codehilite .no { color: #880000 } /* Name.Constant */ +.codehilite .nd { color: #AA22FF } /* Name.Decorator */ +.codehilite .ni { color: #717171; font-weight: bold } /* Name.Entity */ +.codehilite .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */ +.codehilite .nf { color: #0000FF } /* Name.Function */ +.codehilite .nl { color: #767600 } /* Name.Label */ +.codehilite .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */ +.codehilite .nt { color: #008000; font-weight: bold } /* Name.Tag */ +.codehilite .nv { color: #19177C } /* Name.Variable */ +.codehilite .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */ +.codehilite .w { color: #bbbbbb } /* Text.Whitespace */ +.codehilite .mb { color: #666666 } /* Literal.Number.Bin */ +.codehilite .mf { color: #666666 } /* Literal.Number.Float */ +.codehilite .mh { color: #666666 } /* Literal.Number.Hex */ +.codehilite .mi { color: #666666 } /* Literal.Number.Integer */ +.codehilite .mo { color: #666666 } /* Literal.Number.Oct */ +.codehilite .sa { color: #BA2121 } /* Literal.String.Affix */ +.codehilite .sb { color: #BA2121 } /* Literal.String.Backtick */ +.codehilite .sc { color: #BA2121 } /* Literal.String.Char */ +.codehilite .dl { color: #BA2121 } /* Literal.String.Delimiter */ +.codehilite .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */ +.codehilite .s2 { color: #BA2121 } /* Literal.String.Double */ +.codehilite .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */ +.codehilite .sh { color: #BA2121 } /* Literal.String.Heredoc */ +.codehilite .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */ +.codehilite .sx { color: #008000 } /* Literal.String.Other */ +.codehilite .sr { color: #A45A77 } /* Literal.String.Regex */ +.codehilite .s1 { color: #BA2121 } /* Literal.String.Single */ +.codehilite .ss { color: #19177C } /* Literal.String.Symbol */ +.codehilite .bp { color: #008000 } /* Name.Builtin.Pseudo */ +.codehilite .fm { color: #0000FF } /* Name.Function.Magic */ +.codehilite .vc { color: #19177C } /* Name.Variable.Class */ +.codehilite .vg { color: #19177C } /* Name.Variable.Global */ +.codehilite .vi { color: #19177C } /* Name.Variable.Instance */ +.codehilite .vm { color: #19177C } /* Name.Variable.Magic */ +.codehilite .il { color: #666666 } /* Literal.Number.Integer.Long */ diff --git a/docs/docs_output/assets/scroll-down.png b/docs/docs_output/assets/scroll-down.png new file mode 100644 index 00000000..de804bd2 Binary files /dev/null and b/docs/docs_output/assets/scroll-down.png differ diff --git a/docs/docs_output/client-docs/ccs/cluster-requirements-azure.html b/docs/docs_output/client-docs/ccs/cluster-requirements-azure.html new file mode 100644 index 00000000..fbc63a9d --- /dev/null +++ b/docs/docs_output/client-docs/ccs/cluster-requirements-azure.html @@ -0,0 +1,908 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Summary for the requirements of a new Cluster. +

+ + +

+ Database (Azure Database for PostgreSQL - Flexible Server) +

+ + +

+ Minimum requirements +

+
    +
  • + Version: 14 or later +
  • +
  • + Workload Type: Production +
  • +
  • + Compute+Storage: General Purpose, D4ds_v5 +
  • +
  • + Geo-Redundancy and High Availability optional but recommended. +
  • +
  • + Admin user/password required and must be provided to Datacoves. +
  • +
  • + Storage Type: Premium SSD +
  • +
  • + Storage Size: 128 GiB +
  • +
  • + Performance Tier: P10 +
  • +
  • + Storage auto growth enabled optional but recommended. +
  • +
+ + +

+ Kubernetes Services +

+ + +

+ Configuration +

+
    +
  • + Kubernetes version: 1.30.6 or later +
  • +
+ + +

+ Node pools +

+
    +
  • + general +
  • +
  • + volumed +
  • +
  • + workers - Standard_D4s_v3 node, 128 gig OS disk size +
  • +
+ + +

+ Worker groups +

+
    +
  • + General +
  • +
  • + Volumed +
  • +
  • + Workers +
  • +
+ + +

+ General +

+
    +
  • + Standard_D4s_v3 +
  • +
  • + min_nodes: 1 +
  • +
  • + max_nodes: 4 +
  • +
  • + root_volume_size: 128 +
  • +
  • + labels: +
  • +
+
labels:
+    ...
+    - key: k8s.datacoves.com/nodegroup-kind
+    value: general
+
+ + +

+ Volumed +

+
    +
  • + Standard_D16s_v5 +
  • +
  • + min_nodes: 1 +
  • +
  • + max_nodes: 4 +
  • +
  • + root_volume_size: 512 +
  • +
  • + labels: +
  • +
+
labels:
+    ...
+    - key: k8s.datacoves.com/nodegroup-kind
+    value: volumed
+
+ + +

+ Workers +

+
    +
  • + min_nodes: 1 +
  • +
  • + max_nodes: 4 +
  • +
  • + root_volume_size: 128 +
  • +
  • + labels: +
  • +
+
labels:
+    ...
+    - key: k8s.datacoves.com/workers
+    value: enabled
+
+ + +

+ Other configuration. +

+ + +

+ SSL Certificate +

+

+ We recommend using a wildcard certificate, however we can also use cert manager for free certificates if that is the preference. +

+

+ Certificates must be issued for: +

+
    +
  • + + *.domain.com + +
  • +
  • + + domain.com + +
  • +
+

+ Where 'domain.com' is whatever base domain you wish to use. We recommend using "datacoves.YOUR_DOMAIN.YOUR_TLD", such as 'datacoves.mycompany.com'. In such a case, you would need certificates for: +

+
    +
  • + + *.datacoves.mycompany.com + +
  • +
  • + + datacoves.mycompany.com + +
  • +
+ + +

+ DNS Configuration +

+

+ Either DNS must be configured to support the same wildcard and base domain, or the cluster must be allowed to create DNS entries via kubernetes' external-dns annotation. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/index.html b/docs/docs_output/client-docs/index.html new file mode 100644 index 00000000..9087a2ad --- /dev/null +++ b/docs/docs_output/client-docs/index.html @@ -0,0 +1,807 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Customer clusters +

+ + +

+ jnj +

+
    +
  1. + Artemis +
  2. +
  3. + Artemis dev +
  4. +
  5. + Ensemble +
  6. +
  7. + Ensemble test +
  8. +
  9. + RND +
  10. +
+ + +

+ Requirements: +

+
    +
  • + Access to jnj workspace (worspace is provided by jnj and is personal) +
      +
    • + Request access: Onboarding (Noel) +
    • +
    • + Check access: https://jnjitod.service-now.com/myworkspaces +
    • +
    +
  • +
  • + Repo with all configurations https://sourcecode.jnj.com/projects/asx-ahrx/repos/datacoves_deployment/browse (READ.me with all configurations). There will be a specific repo for each clusters (onboarding Noel). +
      +
    • + Request access: https://confluence.jnj.com/pages/viewpage.action?spaceKey=AHRX&title=How+to+request+access+to+Bitbucket+-+How+to+request+access+-+How+to+guides +
    • +
    +
  • +
  • + Access to Bastion +
  • +
+ + +

+ kenvue +

+
    +
  1. + Chap dev +
  2. +
  3. + Chap production +
  4. +
+ + +

+ Requirements: +

+
    +
  • + Access to kenvue microsoft remote desktop (provided by jnj and is personal) +
      +
    • + Request access: Onboarding (Noel) +
    • +
    • + Check access: https://kenvue.sharepoint.com/ +
    • +
    +
  • +
  • + Repo is the same as jnj +
  • +
  • + Access to Bastion +
  • +
+ + +

+ orrum +

+
    +
  1. + old +
  2. +
  3. + new +
  4. +
+ + +

+ Requirements: +

+
    +
  • + Download VPN fron Azure (see client-docs instructions) +
  • +
  • + Credentials in 1 password +
  • +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/1-cluster-requirements.html b/docs/docs_output/client-docs/jnj/1-cluster-requirements.html new file mode 100644 index 00000000..a2479f23 --- /dev/null +++ b/docs/docs_output/client-docs/jnj/1-cluster-requirements.html @@ -0,0 +1,1416 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Datacoves cluster requirements +

+

+ + Summary for the requirements of a new Cluster. + +

+ + +

+ EKS cluster +

+

+ The clusters are created through CloudX pipelines, from + + cluster.yaml + + files ( + + docs + + ). +For every cluster there's a git repository with the cluster definition. If your +team create one of this repositories, please either grant access to datacoves staff so +we can make changes if required or ask us to check your + + cluster.yaml + + . +

+

+ An example repository of this kind is + + itx-ank/ensemble + + . +

+

+ Important configuration to take into consideration: +

+
    +
  • + Kubernetes version: latest confirmed working version. This is either -1 or -2 releases from current based on the time of year. +
  • +
  • + Addons versions +
  • +
  • + Worker groups: general, volumed, and workers. +
  • +
+ + +

+ Cluster configuration files +

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ Cluster + + Repository + + Branch +
+ Ensemble test + + https://sourcecode.jnj.com/projects/ITX-ANK/repos/ensemble/browse/_scm_cluster + + test +
+ Ensemble + + https://sourcecode.jnj.com/projects/ITX-CCC/repos/ensemble/browse/_scm_cluster + + production +
+ R&D + + https://sourcecode.jnj.com/projects/ITX-BHE/repos/integrationscluster/browse/_scm_cluster + + test +
+ Artemis Dev + + https://sourcecode.jnj.com/projects/ITX-ADW/repos/artemiseks/browse/_scm_cluster + + development +
+ Artemis + + https://sourcecode.jnj.com/projects/ITX-ADW/repos/artemiseks/browse/_scm_cluster + + production +
+ Chap development + + https://sourcecode.jnj.com/projects/ITX-WCR/repos/datacove/browse/_scm_cluster + + development +
+ Chap production + + https://sourcecode.jnj.com/projects/ITX-WCR/repos/datacove/browse/_scm_cluster + + production +
+

+ Once the cluster was provisioned, you'll receive an e-mail containing the details to configure + + kubectl + + . Please forward to the datacoves team. +

+

+ The installer will need kubectl access to the cluster + + docs + + . +

+ + +

+ Opt out from EFS CSI driver +

+

+ The EFS CSI driver installed by cloudx is usually outdated (v1.0.0) so we need to opt out from the cloudx managed service. +

+

+ To opt out from EFS CSI managed driver, create a pull request on this repo, similar to this + + one + + . +

+ + +

+ External DNS +

+

+ In the cluster.yaml configuration there is a key + + external_dns + + . This key deploys the service + + External DNS + + to the cluster, managed by CloudX. +This service might not be available in some clusters yet, so a manual configuration might be needed on Route53 or any other DNS service, typically a CNAME record pointing to the cluster's load balancer hostname. +

+ + +

+ Getting load balancer's hostname +

+
kubectl -n ingress-nginx get svc ingress-nginx-controller -o=jsonpath='{.status.loadBalancer.ingress[0].hostname}'
+
+ + +

+ SSL Certificates Manager +

+

+ CloudX will install + + Cert Manager + + if the cluster supports it. +

+

+ If Cert Manager is not installed, 2 SSL certificates need to be issued manually: +- wildcard certificate: *.[SUBDOMAIN].[DOMAIN] +- root certificate: [SUBDOMAIN].[DOMAIN] +

+

+ A certificate chain file and a Private key are required for each certificate, please send the 4 files to Datacoves staff. +

+ + +

+ Git repositories +

+ + +

+ Config repo +

+

+ Each datacoves installation requires a configuration repo where Datacoves staff will store configuration details. +

+

+ Please create one repo per kubernetes cluster and grant access to Datacoves staff. +

+ + +

+ Dbt analytics repo +

+

+ This is the repo where your analytics (dbt) project resides, along with airflow dags, db security roles, documentation, etc. +

+ + +

+ Git Service Account +

+

+ Please create a Service Account with read access to the analytics repo, since that service account will be configured on services like Airflow and dbt-docs to read files from the repo. +

+

+ To do so, submit a PR to have Cloudx stop managing the currently installed driver here: https://sourcecode.jnj.com/projects/ITX-AED/repos/cloudx_container_pipelines_configs/browse/argocd/config.yaml#19 +

+

+ This account will be also used by Jenkins to download images from artifactory (taqy-docker namespace), so please request access to + + taqy-docker + + on that account via AppDevTools. +

+ + +

+ Database +

+

+ Some services require Postgres databases, as described below. These databases can share an RDS instance or aurora cluster. You will need to create this database cluster/instance and ensure it can be accessed from the EKS cluster. +

+ + +

+ Minimum requirements +

+
    +
  • + Engine: Postgres +
  • +
  • + Version: 14.9 +
  • +
  • + Multi-AZ: "Single DB Instance" for sandbox clusters, "Multi-AZ DB Cluster" if not. +
  • +
  • + Master user: postgres +
  • +
  • + Master password: + + +
  • +
  • + Instance class: db.r5.large +
  • +
  • + Storage type: Aurora Standard or gp2 +
  • +
  • + Allocated_storage: 100GB +
  • +
  • + Enable storage autoscaling +
  • +
  • + Maximum storage threshold: 1TB +
  • +
  • + Authentication: password +
  • +
+

+ Keep in mind that JNJ cycles the master password every 24 hours so you need to run any setup command using this password before that happens. +

+ + +

+ Initial database and user +

+

+ You'll need to create a master Postgres user and the datacoves database: +

+
CREATE USER datacoves PASSWORD insert_generated_random_password_without_special_characters;
+ALTER USER datacoves CREATEDB CREATEROLE;
+GRANT datacoves TO postgres;
+CREATE DATABASE datacoves OWNER datacoves;
+REVOKE connect ON DATABASE datacoves FROM PUBLIC;
+GRANT connect ON DATABASE datacoves TO datacoves;
+GRANT connect ON DATABASE datacoves TO postgres;
+
+

+ A way to generate passwords: + + python -c 'import secrets; print(secrets.token_urlsafe())' + + . +Avoid special characters, they cause issues with some services, such as airflow. +

+

+ Please share this password with the Datacoves team. +

+ + +

+ Active Directory groups +

+

+ Roles/groups required for datacoves users: +

+
JNJ-APP-{division}-DATACOVES-ADMIN
+JNJ-APP-{division}-DATACOVES-DEVELOPER
+JNJ-APP-{division}-DATACOVES-VIEWER
+JNJ-APP-{division}-DATACOVES-KTLO
+
+

+ Substitute your + + {division} + + , e.g. + + PCE + + , + + HMD + + , + + CHAP + + , etc. +

+ + +

+ Ping identity account +

+

+ Submit a ticket to + + Web Single Sign-On - SAML Federation + + to create a ping account. +

+ + +

+ IRIS Request +

+ + +

+ Short Description +

+

+ This is a request to enable SSO for + + cluster. + +

+ + +

+ Description +

+

+ Need to add PingID to application. +

+ + +

+ Groups +

+

+ Need groups only filtered to ones that have the following pattern JNJ-APP- + + -DATACOVES-* + +

+ + +

+ Type +

+

+ Choose: OAuth/OpenID Connect +

+ + +

+ Client id +

+

+ It should be any name for your cluster (e.g. + + chapsbx + + , + + emea_ensemble_test + + , + + emea_artemis_dev + + , etc.). +

+ + +

+ Redirect urls +

+

+ + https://api.{cluster_domain}/complete/ping_federate + +

+ + +

+ Additional fields +

+

+ Requires interactive electronic signatures using SSO: No +Attributes: groups, openid, profile, email +

+

+ When the Iris request is fulfilled, you will receive an email with: +

+
    +
  • + Client ID (verify this is the one that was requested) +
  • +
  • + Client Secret +
  • +
  • + A list of OAuth endpoints +
  • +
+

+ Please share this information with the Datacoves team. +

+ + +

+ Airflow +

+ + +

+ EFS file system for airflow logs +

+

+ Follow the instructions to "Create EFS in AWS Account" from + + this confluence page + + . Don't follow the other sections of the page. +

+

+ As a name use datacoves-[cluster id]-[environment slug]-airflow-logs. +

+

+ It's important to attach the right the EKS security group so the EKS cluster has access to the EFS filesystem. You can find the security group id in the EKS cluster admin page, Networking tab, under + + Additional security groups + + . +

+ + +

+ S3 bucket for Airflow dags +

+

+ Due to bitbucket scheduled downtimes we recommend using S3 as the DAGs store to mimimize disruptions. +

+
    +
  1. + Create an S3 bucket per environment, i.e. datacoves-[cluster id]-[environment slug]-airflow-dags (datacoves-ensemble-pro001-airflow-dags) +
  2. +
  3. + Create an IAM policy that grants read/write access to the new S3 bucket created, use the same name convention used for the S3 bucket. +
  4. +
  5. + Follow + + this instructions + + to create an IAM Role, up to "Create IAM Role For K8s Service Account", attach the policy you created on step 2. Name the IAM role using the same convention you used for the S3 bucket +
  6. +
  7. + Do not associate the IAM role to a K8s Service Account, that part is managed by Datacoves. +
  8. +
  9. + Create a IAM user for jenkins to upload the dbt project and dags to S3. Use the same naming convention. Attach the same policy you created on step 2. +
  10. +
+ + +

+ Trusted policy example: +

+
{
+    "Version": "2012-10-17",
+    "Statement": [
+        {
+            "Effect": "Allow",
+            "Principal": {
+                "Federated": "arn:aws:iam::327112934799:oidc-provider/oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB"
+            },
+            "Action": "sts:AssumeRoleWithWebIdentity",
+            "Condition": {
+                "StringLike": {
+                    "oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB:sub": "system:serviceaccount:dcw-dev123:dev123-airflow-*",
+                    "oidc.eks.us-east-1.amazonaws.com/id/125EA29C302DF7DBB900ED84AA85F0BB:aud": "sts.amazonaws.com"
+                }
+            }
+        }
+    ]
+}
+
+ + +

+ DBT API +

+
    +
  • + Create an S3 bucket. +
  • +
  • + Choose a bucket name, we suggest using + + _dbt_api where + + could be + + ensemble + + , + + ensembletest + + , etc. + + +
  • +
  • + Create an IAM user with a policy to access the bucket, like the one below, + replacing + + {your_bucket_name} + + with your bucket's name. +
  • +
  • + Create an access key for the user. Share it with the Datacoves team. +
  • +
+
{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:PutObject",
+        "s3:GetObject",
+        "s3:GetObjectVersion",
+        "s3:DeleteObject",
+        "s3:DeleteObjectVersion"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}/*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:ListBucket",
+        "s3:GetBucketLocation"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}"
+    }
+  ]
+}
+
+ + +

+ Grafana +

+

+ Grafana requires an S3 bucket with lifecycle management enabled. +Follow + + this guide + + to configure it accordingly. +

+ + +

+ Airbyte +

+
    +
  • + S3 bucket for airbyte logs, an IAM user with a policy to access it, and an + access key for the user. +
  • +
+ + +

+ S3 bucket for airbyte logs +

+
    +
  • + Create an S3 bucket. +
  • +
  • + Create an IAM user with a policy to access the bucket, like the one below, + replacing + + {your_bucket_name} + + with your bucket's name. +
  • +
  • + Create an access key for the user. Share it with the Datacoves team. +
  • +
+
{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:PutObject",
+        "s3:GetObject",
+        "s3:GetObjectVersion",
+        "s3:DeleteObject",
+        "s3:DeleteObjectVersion"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}/*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:ListBucket",
+        "s3:GetBucketLocation"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}"
+    }
+  ]
+}
+
+ + +

+ Data warehouse connection templates +

+

+ Please define how your data warehouse architecture will look and define the connection templates for both Analytics Engineers and Services, I.e. on a Snowflake database you’ll need to specify fields such as account, warehouse, database, role. +

+ + +

+ Terraform +

+

+ Some work has been done (repo: + + itx-azt/iac + + ) to automate the creation of +these cluster requirements using terraform. However, because of authorization +restrictions imposed on terraform in jnj, it still requires manual +intervention. At the moment it is probably faster overall to do everything +manually. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/2-configuration.html b/docs/docs_output/client-docs/jnj/2-configuration.html new file mode 100644 index 00000000..535d2340 --- /dev/null +++ b/docs/docs_output/client-docs/jnj/2-configuration.html @@ -0,0 +1,883 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Configuring datacoves +

+

+ Configuration for each cluster is kept in a separate repository. They are +mounted as git submodules under + + config/{cluster_domain} + + . +

+

+ You will need to create this git repository if there isn't one already for your +cluster. Grant access to datacoves staff to this repo so we can initialize the +configuration files and add the people that will do configuration or deployment +to the git secret keyring. +

+

+ Clone this configuration to make changes to it. Alternatively, if you will run +the datacoves deployment from the same machine you can clone the datacoves_deployment +repository which has the configuration repos as + + git submodules + + . +

+
git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git
+cd datacoves_deployment
+git submodule init
+git submodule update config/$cluster_domain  # Specify the path to the submodule to update.
+cd config/$cluster_domain # Config repo cloned as submodule in here.
+
+

+ After the initial setup, the workflow to update configuration is as follows: +

+
# From within the cluster configuration repo.
+
+# 1. Fetch the latest configuration.
+git checkout main
+git pull
+git secret reveal -f
+
+# 2. Make your changes (see below what's required).
+
+# 3. Commit and push your changes.
+git secret hide
+git diff # Review your changes, all sensitive data should be encrypted.
+git add .
+git commit -m 'Updated secrets/configuration.'
+git push
+
+ + +

+ What values are required? +

+

+ Initially the configuration files will contain + + TODO + + comments to mark the +places where values need to be filled in. Run + + grep -r . TODO + + to see what's +pending. Remove the + + TODO + + comments when you add a value. Most values are used +to configure the external services that were created during + + cluster setup + + . +

+

+ The configuration variable names should give you an indication of what's needed. +If in doubt, ask. +

+

+ The requirements for each datacoves service follow. The list may be a useful +guide but it could be out of date. Please rely on the + + TODO + + marks, not on the +list, as authoritative information. +

+ + +

+ Datacoves core +

+
    +
  • + Datacoves api DB host ( + + DB_HOST + + ) and password ( + + DB_PASS + + ) in + + secrets/core-api.env + +
  • +
  • + PING_CLIENT_ID and PING_CLIENT_SECRET in + + secrets/core-api.env + +
  • +
  • + Ping group names in + + cluster-params.yaml + + , under + + project + + . +
  • +
  • + Postgres DB Provisioner for services such as airbyte/airfow/superset in + + cluster-params.secret.yaml + + under + + postgres_db_provisioner + + . +
  • +
+ + +

+ DBT Docs +

+
    +
  • + Deploy credentials in + + cluster-params.secret.yaml + + under + + deploy_credentials + + . +
  • +
+ + +

+ Airbyte +

+

+ Not yet documented. +

+ + +

+ Airflow +

+

+ The EFS CSI driver installed by cloudx is usually outdated (v1.0.0) so we need to opt out from the cloudx managed service. +

+

+ To do so, submit a PR to have Cloudx stop managing the currently installed driver here: https://sourcecode.jnj.com/projects/ITX-AED/repos/cloudx_container_pipelines_configs/browse/argocd/config.yaml#19 +

+
    +
  • + Airflow EFS volume_handle (fs id) in: + + environments/dev123/airflow.secret.yaml + +
  • +
+ + +

+ Superset +

+

+ Not yet documented. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/3-configure-bastion-ec2-instance.html b/docs/docs_output/client-docs/jnj/3-configure-bastion-ec2-instance.html new file mode 100644 index 00000000..c85b97bc --- /dev/null +++ b/docs/docs_output/client-docs/jnj/3-configure-bastion-ec2-instance.html @@ -0,0 +1,903 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Configure Bastion EC2 instance +

+ + +

+ JNJ +

+

+ Name: +Host: AWSAZTIRLL000Q.jnj.com +

+ + +

+ SSH to instance +

+
    +
  1. + In your AWS workspace/Microsoft Remote Desktop (windows) open a terminal + + ssh 10.157.82.138 -m hmac-sha2-512 + + or +
  2. +
  3. + Create a shortcut to ssh pointing to + + C:\Windows\System32\OpenSSH\ssh.exe 10.157.82.138 -m hmac-sha2-512 + +
  4. +
  5. + Click on the shortcut and type your password to access the instance +
  6. +
+ + +

+ CHAP +

+

+ Name: itx-wcr-EKS workstation +Host: awswcrnval001n.kenvue.com +

+ + +

+ Request role +

+

+ In your + + Remote Desktop + + go to + + IAM + + : +

+
    +
  1. + Request / Star a new request +
  2. +
  3. + Request the following roles: +
      +
    • + ITS-ITX-WCR-Datacove-Prd-K8sOperator +
    • +
    • + ITS-ITX-WCR-Datacove-Prd-K8sMonitor +
    • +
    • + ITS-ITX-WCR-Datacove-Prd-K8sAdmin +
    • +
    • + ITS-EP-AWSWCRNVAL001N-LINUX-NA-UNIXSEAdmins +
    • +
    +
  4. +
  5. + Details: +
      +
    • + Job role: Datacoves Support +
    • +
    • + Application ID: APP000300001207 +
    • +
    • + Application Name: DATACOVES-ANALYTICS PRODUCTION WORKBENCH FOR ELT & ORCHESTRATION +
    • +
    • + Describe, in detail, the job functions you perform that REQUIRE this level of privilege: We maintain and support the Datacoves application which runs on Kubernetes. +
    • +
    • + Is the Application Software (includes Web Components, Vendor Application), installed on the Server on which you are requesting Admin Access? No / Yes: No +
    • +
    • + Frequency of Need: Weekly +
    • +
    +
  6. +
  7. + Submit +
  8. +
+ + +

+ SSH to instance +

+
    +
  1. + On the terminal run command + + ssh 10.79.29.123 + +
  2. +
  3. + Your user should be added to the following groups in + + /etc/groups + +
  4. +
+ + +

+ Create your working directory +

+

+ Create your working directory under + + /app/users + + , i.e. + + /app/users/ssassi + + . +

+ + +

+ Grant you access to docker +

+
sudo su -
+vi /etc/group
+
+

+ Example: +

+
datacoves:x:8653:amorer01,<my-user>  # To chap
+docker:x:187:amorer01,<my-user>
+
+ + +

+ Configure your home folder (~) +

+
    +
  1. + Copy the contents of + + /app/users/datacoves-home-template + + to your home folder: +
  2. +
+
cp -R /app/users/datacoves-home-template/. ~/
+
+
    +
  1. + Exit and reconnect to the instance to ensure that the + + .bashrc + + script was ran accordingly +
  2. +
  3. + Fix kubelogin permissions +
  4. +
+
asdf uninstall kubelogin
+asdf install kubelogin
+
+
    +
  1. + Configure your credentials to the clusters +
  2. +
+
kc config get-contexts
+kc config use-context <choose one>
+kc get ns
+
+

+ Note: you'll need to change your ~/.kube/config permissions: +

+
chmod 600 ~/.kube/config
+
+ + +

+ Clone datacoves deployment repo +

+
/app/users/<your username>
+git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git
+
+

+ After clonning, follow the instructions to reveal secrets and install requirements. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/4-configure-bitbucket-and-jenkins.html b/docs/docs_output/client-docs/jnj/4-configure-bitbucket-and-jenkins.html new file mode 100644 index 00000000..202921a8 --- /dev/null +++ b/docs/docs_output/client-docs/jnj/4-configure-bitbucket-and-jenkins.html @@ -0,0 +1,870 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to configure Bitbucket project and connect it with Jenkins project +

+ + +

+ Bitbucket +

+ + +

+ Ensure you enable the following hooks in your Bitbucket project +

+
    +
  • + JnJ VPCx - Post Receive Repository Hook for SCM +
  • +
  • + Webhook to Jenkins for Bitbucket Server +
  • +
+

+ Bitbucket hooks +

+ + +

+ JnJ VPCx - Post Receive Repository Hook for SCM +

+

+ Post Receive Repository Hook for SCM +

+ + +

+ Webhook to Jenkins for Bitbucket Server +

+ + +

+ Tab 1 +

+

+ Webhook tab 1 +

+ + +

+ Tab 2 +

+

+ Webhook tab 2 +

+ + +

+ Tab 3 +

+

+ Webhook tab 3 +

+ + +

+ Enable the following Merge Checks +

+

+ Merge Checks +

+ + +

+ Request access to taqy-docker for the project service account +

+

+ Typically the service account created automatically is + + sa-itsus-<PROJECT CODE>-devusr + + . +

+

+ Go to App Dev Tools and request access for that user, like so: +

+

+ App Dev Tools +

+ + +

+ Jenkins +

+ + +

+ Ensure Bitbucket plugins were correctly configured +

+

+ Navigate to Manage Jenkins -> Configure System and modify the following plugins: +

+

+ Bitbucket Server +

+

+ Bitbucket Notifier +

+ + +

+ Create Multibranch pipeline project +

+

+ At Home page -> "+ New Item": +

+

+ Multibranch pipeline project +

+ + +

+ Configure branch sources +

+

+ Branch sources +

+ + +

+ Configure repo behaviors +

+

+ Branch repo behaviors +

+ + +

+ Set up build configuration and other items +

+

+ Build configuration +

+

+ Other items +

+ + +

+ Jenkinsfile dependencies +

+

+ You'll need a credential that stores the secrets used to connect to your Data Warehouse. +

+

+ Create a new credential in the Jenkins Admin area. As of Aug. '23 those can be found in: +

+

+ + Dashboard -> Credentials -> System -> Global Credentials (unrestricted) + +

+

+ New credential +

+

+ New credential +

+ + +

+ Known issues +

+
    +
  • + When "pre hook declined" it could be due to JIRA issues configuration: from settings -> + + Jira Issues + + select "Use custom settings" and be sure "Don't need a Jira issue key" is selected +
  • +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/5-deployment.html b/docs/docs_output/client-docs/jnj/5-deployment.html new file mode 100644 index 00000000..0bdafbcb --- /dev/null +++ b/docs/docs_output/client-docs/jnj/5-deployment.html @@ -0,0 +1,734 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to deploy (or update) datacoves to a kubernetes cluster +

+

+ Prerequisites: + + cluster and external resources setup + + . +

+

+ SSH into a machine with kubectl access to the cluster from where you will run +the installation scripts. Then: +

+
# Set these as needed for your cluster.
+cluster_domain=FILL_IN   # e.g. ensembletest.apps.jnj.com
+kubectl_context=FILL_IN  # e.g. itx-ank-ensemble-test
+
+# Clone the repository into the installation workstation (required once).
+git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git
+cd datacoves_deployment
+
+# Install python dependencies for the installation scripts (required once).
+pip3 install --user -r requirements.txt
+
+# Fetch the latest changes and reveal the secrets in the config submodule directory.
+git pull
+git submodule update --init
+(cd config/$cluster_domain; git secret reveal -f)
+
+# Install datacoves base dependencies into the cluster (ingress-nginx, etc.)
+# Usually not required after the first time datacoves is released to a cluster.
+./cli.py setup_base $kubectl_context $cluster_domain
+
+# Deploying ingress-nginx will create an ELB. Use the following command to retrieve it's URL.
+kubectl --context $kubectl_context get -A svc | grep LoadBalancer
+
+# Update cluster-params.yaml setting external_dns_url to that URL.
+$EDITOR config/$cluster_domain/cluster-params.yaml
+# Commit the change.
+
+# Install/update datacoves.
+./cli.py install
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/6-access.html b/docs/docs_output/client-docs/jnj/6-access.html new file mode 100644 index 00000000..cebacefb --- /dev/null +++ b/docs/docs_output/client-docs/jnj/6-access.html @@ -0,0 +1,910 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Access Datacoves on JNJ clusters +

+ + +

+ Requesting Roles +

+
+

+ + NOTE: + + Please inform Martin Ryan before requesting appdevtools roles +

+
+

+ In order to have access to all third-party tools (Bitbucket, Jenkins, Artifactory, etc.) you must request specific roles. +

+

+ To do so, you must go to https://appdevtools.jnj.com and request the + + Datacoves Support + + Model User template. +

+

+ Make sure to write + + Needed for Datacoves platform support + + as requesting reason. +

+

+ AppDevTools User Template +

+ + +

+ Datacoves Access +

+

+ In order to get access as an admin and developer on the different JNJ clusters you need to request the following AD groups: +

+ + +

+ Medical Devices +

+
    +
  • + JNJ-APP-HMD-DATACOVES-ADMIN +
  • +
  • + JNJ-APP-HMD-DATACOVES-DEVELOPER +
  • +
+ + +

+ Consumer Health / Kenvue +

+

+ Go to iam.kenvue.com, search for + + Datacoves + + . There's one role for Admin and one per Environment. You need to request + + ADMIN + + , + + *-DEVELOPER + + and + + *-SYSADMIN + + roles. For example: +

+
    +
  • + ITS_APP_DATACOVES_ADMIN +
  • +
  • + ITS_APP_DATACOVES_DEV_CBI_VMA_DEVELOPER +
  • +
  • + ITS_APP_DATACOVES_DEV_CBI_VMA_SYSADMIN +
  • +
  • + ITS_APP_DATACOVES_DEV_COMX_CUST_DEVELOPER +
  • +
  • + ITS_APP_DATACOVES_DEV_COMX_CUST_SYSADMIN +
  • +
  • + ITS_APP_DATACOVES_DEV_COMX_GLOBAL_DEVELOPER +
  • +
+ + +

+ Ensemble +

+
    +
  • + JNJ-APP-PCE-DATACOVES-ADMIN +
  • +
  • + JNJ-APP-PCE-DATACOVES-DEVELOPER +
  • +
+ + +

+ R&D +

+
    +
  • + ITS-APP-DEV-JRDDATACOVES-ADMIN +
  • +
  • + ITS-APP-DEV-JRDDATACOVES-DEVELOPER +
  • +
+ + +

+ taqy Access +

+

+ taqy is the docker repository used by all J&J instances. Access to it is necessary in order to manage images on it. +

+

+ To request access, use https://appdevtools.jnj.com +

+
    +
  • + Request Access, By User, Other +
  • +
  • + Enter your username +
  • +
  • + Tool: EAT Jenkins and Artifactory +
  • +
  • + Team: taqy +
  • +
  • + Reason for request: Access to CI images for DataCoves +
  • +
  • + Grant these roles: ITS-ASx-TAQY-DEV-Executors, ITS-ASx-TAQY-DEV-Viewers +
  • +
+

+ For reference, the main Ensemble Jenkins user is sa-itsus-jbfl-devusr +

+ + +

+ Snowflake Access +

+

+ As done with the groups above, you must also request + + JNJ-APP-PCE-SNOWFLAKE-EMEA-DEVELOPER + +

+ + +

+ How to request the groups? +

+

+ Using the AWS workspace: +

+
    +
  1. + Navigate to https://iam.jnj.com +
  2. +
  3. + Click on + + Start new request + +
  4. +
  5. + Type the group name on the + + Find a service item + + search box. +
  6. +
  7. + Click on + + Request + + button +
  8. +
  9. + In the popup, leave + + Valid from + + and + + Valid until + + empty, in the + + reason + + field type "Datacoves support team". +
  10. +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/7-configure-sa-docker-in-kubernates.html b/docs/docs_output/client-docs/jnj/7-configure-sa-docker-in-kubernates.html new file mode 100644 index 00000000..b0bf6209 --- /dev/null +++ b/docs/docs_output/client-docs/jnj/7-configure-sa-docker-in-kubernates.html @@ -0,0 +1,764 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to configure Service Account Docker in Kubernetes for pull images. +

+

+ + JnJ + + and + + Kenvue + + are using their own private Docker artifact repositories. In order to download images from those repositories in Kubernetes we need to create secrets with valid credentials in each Kubernetes cluster. +

+

+ This process is documented by JnJ at + + conflunce + + . +

+ + +

+ Select Kubernates context +

+
kubectl config get-contexts
+kubectl config use-context <context>
+
+ + +

+ Delete old service account (If it already exists) +

+
kubectl get secrets -n default
+kubectl delete secret taqy-docker -n default
+
+ + +

+ Create new service account +

+
# Create secret in default namespace - Recommended to use the EAT service account username and password for credentials
+kubectl create secret docker-registry taqy-docker --docker-server=jnj.artifactrepo.jnj.com --docker-username=<service-account-username> --docker-password=<service-account-password> -n default
+
+# Annotate secret to sync across all namespaces
+kubectl annotate secret taqy-docker cluster.managed.secret="true" -n default
+
+ + +

+ Inspect the new secret +

+
kubectl -n default get secret taqy-docker -o yaml
+
+

+ Copy the value from + + data.dockerconfigjson + +

+
echo <value> | base64 -d
+
+

+ Note: Check that the secrets have been replicated to all namespaces. (Can check one or two) +

+
kubectl -n <namespace> get secret taqy-docker -o yaml
+echo <value> | base64 -d
+
+

+ If the secret was not replicated, check the pod's logs: +

+
kubectl -n kube-system get pods
+kubectl -n kube-system logs namespace-secrets-sync-<hash> --tail 100
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/8-summary-requirements-new-cluster.html b/docs/docs_output/client-docs/jnj/8-summary-requirements-new-cluster.html new file mode 100644 index 00000000..fc7e8d1a --- /dev/null +++ b/docs/docs_output/client-docs/jnj/8-summary-requirements-new-cluster.html @@ -0,0 +1,928 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Summary for the requirements of a new Cluster. +

+

+ For more details check + + Cluster requirements + +

+ + +

+ Database (RDS) +

+ + +

+ Minimum requirements +

+
    +
  • + Engine: Postgres +
  • +
  • + Version: 14.9 +
  • +
  • + Multi-AZ DB Cluster. +
  • +
  • + Master user: postgres +
  • +
  • + Master password: + + +
  • +
  • + Instance class: db.r5.large +
  • +
  • + Storage type: Aurora Standard or gp2 +
  • +
  • + Allocated_storage: 100GB +
  • +
  • + Enable storage autoscaling +
  • +
  • + Maximum storage threshold: 1TB +
  • +
  • + Authentication: password +
  • +
+ + +

+ EKS +

+ + +

+ Configuration +

+
    +
  • + External DNS. +
  • +
  • + + m5.xlarge + + instances. +
  • +
+ + +

+ Worker groups +

+
    +
  • + General +
  • +
  • + Volumed +
  • +
  • + Workers +
  • +
+ + +

+ General +

+
    +
  • + min_nodes: 1 +
  • +
  • + max_nodes: 30 +
  • +
  • + root_volume_size: 200 +
  • +
  • + labels: +
  • +
+
labels:
+    ...
+    - key: k8s.datacoves.com/nodegroup-kind
+    value: general
+
+ + +

+ Volumed +

+
    +
  • + min_nodes: 1 +
  • +
  • + max_nodes: 30 +
  • +
  • + root_volume_size: 200 +
  • +
  • + labels: +
  • +
+
labels:
+    ...
+    - key: k8s.datacoves.com/nodegroup-kind
+    value: volumed
+
+ + +

+ Workers +

+
    +
  • + min_nodes: 1 +
  • +
  • + max_nodes: 30 +
  • +
  • + root_volume_size: 200 +
  • +
  • + labels: +
  • +
+
labels:
+    ...
+    - key: k8s.datacoves.com/workers
+    value: enabled
+
+ + +

+ Other configuration. +

+
    +
  • + EFS for each environment for + + Airflow Logs + + . +
  • +
  • + S3 buckets for each environment for + + Dags sync + + , with read-only permissions. (Optional. Can be git-sync). +
  • +
  • + One S3 bucket for + + Observavility stack + + . Example + + ensemble-prd-observability-grafana-loki + + . (Full permissions) +
  • +
  • + One S3 bucket for + + dbt-api + + . Example + + ensemble-prd-dbt-api + + . (Full permissions) +
  • +
+ + +

+ Example for full S3 bucket permission +

+
{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:PutObject",
+        "s3:GetObject",
+        "s3:GetObjectVersion",
+        "s3:DeleteObject",
+        "s3:DeleteObjectVersion"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}/*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:ListBucket",
+        "s3:GetBucketLocation"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}"
+    }
+  ]
+}
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/jnj/index.html b/docs/docs_output/client-docs/jnj/index.html new file mode 100644 index 00000000..f24f6ab4 --- /dev/null +++ b/docs/docs_output/client-docs/jnj/index.html @@ -0,0 +1,729 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Datacoves deployment +

+

+ This repository contains the datacoves installation scripts. They install +datacoves to an existing EKS cluster, based on the configuration files in the + + config + + directory. Configuration for each cluster is kept in a separate +repository. They are mounted as git submodules under + + config/{cluster_domain} + + . +

+

+ Before running the installation scripts the EKS cluster and other required AWS +resources must be created. See + + cluster requirements + + . +

+

+ Then a repository to use as the cluster configuration submodule must be created. +See + + configuration + + . +

+

+ After that, deployment can begin. See + + deployment + + . +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/kenvue/how-to-setup-helm-chart.html b/docs/docs_output/client-docs/kenvue/how-to-setup-helm-chart.html new file mode 100644 index 00000000..bdddc7a8 --- /dev/null +++ b/docs/docs_output/client-docs/kenvue/how-to-setup-helm-chart.html @@ -0,0 +1,757 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to set up Helm Chart on kenvue +

+

+ Artifacory: https://kenvue.jfrog.io +Repository: dco-helm +Credentials: See 1password +Protocol: OCI +

+

+ Steps: +

+
    +
  1. + Artifactory login. +
  2. +
  3. + Download or build the helm chart. +
  4. +
  5. + Upload the new helm chart. +
  6. +
  7. + Check the new helm chart. +
  8. +
  9. + Install the helm chart. +
  10. +
+ + +

+ 1. Artifactory login +

+
helm registry login https://kenvue.jfrog.io/dco-helm
+
+ + +

+ 2. Build or download the helm chart. +

+

+ In this case as an example we are going to download a helm chart from the JnJ artifactory +

+
wget --user <my-user> --password <my-password> https://artifactrepo.jnj.com:443/artifactory/jnj-helm-charts/metrics-server-3.12.2.tgz
+
+ + +

+ 3. Upload the new helm chart. +

+
 helm push metrics-server-3.12.2.tgz oci://kenvue.jfrog.io/dco-helm/metrics-server
+
+ + +

+ 4. Check the new helm chart. +

+
helm show all oci://kenvue.jfrog.io/dco-helm/metrics-server
+
+ + +

+ 5. Install the helm chart. +

+
helm install my-release oci://kenvue.jfrog.io/dco-helm/metrics-server --version 3.12.2
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/client-docs/orrum/index.html b/docs/docs_output/client-docs/orrum/index.html new file mode 100644 index 00000000..08d38de2 --- /dev/null +++ b/docs/docs_output/client-docs/orrum/index.html @@ -0,0 +1,798 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Datacoves deployment +

+

+ This section contains instructions on how to connect to Orrum infra via azure to build, maintain, and monitor datacoves deployments. +

+ + +

+ VPN Connection +

+

+ kubectl requires connection to Orrum VPN. Download + + Azure VPN Client + + . +

+

+ The profile can be downloaded from Azure; login with Support_Datacoves@orrumcorp.onmicrosoft.com with the credentials from 1Password. +

+

+ https://portal.azure.com/#@orrum.com/resource/subscriptions/0f8e4c48-c319-4ed9-af14-ef50501e3a41/resourceGroups/DataCoves/providers/Microsoft.Network/virtualNetworkGateways/DataCovesGateway/pointtositeconfiguration +

+

+ Click "Download VPN client" in the header, and you will get a zip file with the profile files; you will want the Azure client profiles, and you can use the Import button in the Azure client to import it. +

+

+ To connect to the vpn, use Support_Datacoves@orrumcorp.onmicrosoft.com, credentials on 1Password. +

+ + +

+ kubectl setup +

+
# Ensure Python is Installed
+pipx install az-cli --include-deps
+
+# Get login password from 1pswd
+az login -u Support_Datacoves@orrumcorp.onmicrosoft.com
+
+# Install kubectl + kubelogin
+az aks install-cli
+
+# Set subscription
+az account set --subscription 0f8e4c48-c319-4ed9-af14-ef50501e3a41
+
+# Get credentials for new cluster
+az aks get-credentials --resource-group DataCoves --name Datacoves_kube
+
+# List contexts
+kubectl config use-context Datacoves_kube
+
+ + +

+ Rename Context +

+

+ It is very important that the context be named orrum-new as things such as updating the cluster will have scripts that depend on the context name. +

+
kubectl config rename-context Datacoves_kube orrum-new
+kubectl config use-context orrum-new
+
+

+ Now verify connectivity with + + kubectl get ns + +

+ + +

+ Config DNS on + + /etc/hosts + + (Optional) +

+

+ Note: This is probably not necessary anymore. +

+

+ You can force the domain and subdomains DNS if it's not configured. +

+
10.10.0.36       datacoves.orrum.com
+10.10.0.36       api.datacoves.orrum.com
+10.10.0.36       authenticate-dev123.datacoves.orrum.com
+10.10.0.36       dev123.datacoves.orrum.com
+10.10.0.36       airbyte-dev123.datacoves.orrum.com
+10.10.0.36       dbt-docs-dev123.datacoves.orrum.com
+10.10.0.36       airflow-dev123.datacoves.orrum.com
+10.10.0.36       superset-dev123.datacoves.orrum.com
+10.10.0.36       grafana.datacoves.orrum.com
+
+# <user>
+10.10.0.36       <user>-1-transform-dev123.datacoves.orrum.com
+10.10.0.36       <user>-1-dbt-docs-dev123.datacoves.orrum.com
+10.10.0.36       <user>-transform-dev123.datacoves.orrum.com
+
+

+ + Note: Check the cluster's Public IP + + 10.10.0.36 + + +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/dev-logs/2021-06-create-er-diagram.html b/docs/docs_output/dev-logs/2021-06-create-er-diagram.html new file mode 100644 index 00000000..ddd759f0 --- /dev/null +++ b/docs/docs_output/dev-logs/2021-06-create-er-diagram.html @@ -0,0 +1,704 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to create an entity relationship diagram +

+
./cli.py pod-sh
+apt install graphviz-dev
+pip3 install pygraphviz
+./manage.py graph_models -a -X *Mixin,Abstract*,ContentType,Session,Nonce,Partial,TokenProxy -g -o core-erd.png
+
+

+ Learn more at https://django-extensions.readthedocs.io/en/latest/graph_models.html +

+

+ ER diagram +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/dev-logs/2021-09-eks-setup.html b/docs/docs_output/dev-logs/2021-09-eks-setup.html new file mode 100644 index 00000000..2ecbc78e --- /dev/null +++ b/docs/docs_output/dev-logs/2021-09-eks-setup.html @@ -0,0 +1,838 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Installation +

+ + +

+ Set up IAM user +

+

+ IAM user needs the following privileges to create the cluster: +

+

+ https://eksctl.io/usage/minimum-iam-policies/ +

+ + +

+ AWS CLI +

+

+ Install AWS CLI in your local environment +

+

+ https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html +

+ + +

+ Configure credentials +

+
    +
  1. + Generate access key +
  2. +
  3. + Configure your credentials +
  4. +
+ + +

+ Install eksctl +

+

+ Install eksctl +

+

+ https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html +

+ + +

+ On Mac +

+
brew tap weaveworks/tap
+brew install weaveworks/tap/eksctl
+
+ + +

+ Create cluster +

+

+ https://eksctl.io/usage/creating-and-managing-clusters/ +

+
eksctl create cluster -f cluster.yaml --tags service=datacoves
+
+ + +

+ Install metrics server +

+

+ https://docs.aws.amazon.com/eks/latest/userguide/metrics-server.html +

+
kubectl apply -f https://github.com/kubernetes-sigs/metrics-server/releases/latest/download/components.yaml
+
+ + +

+ Kubernetes dashboard +

+

+ https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html +

+
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.5/aio/deploy/recommended.yaml
+kubectl apply -f eks-admin-service-account.yaml
+
+ + +

+ Open dashboard +

+
kubectl proxy
+
+

+ http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#!/login +

+

+ Get a login token with: +

+
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}')
+
+ + +

+ Configure Docker hub +

+
kubectl create ns alpha2
+kubectl create secret docker-registry docker-secret \
+--docker-server="https://index.docker.io/v1/" \
+--docker-username="<USER_NAME>" \
+--docker-password="<PASSWORD>" \
+--docker-email="<EMAIL>" \
+--namespace="alpha2"
+
+ + +

+ EKS (k8s on AWS) +

+
# Create the cluster  https://eksctl.io/usage/creating-and-managing-clusters/
+eksctl create cluster -f eks/eks-cluster.yaml
+
+# (Optional) Inspect the config that kustomize generates
+kubectl kustomize eks
+
+# Apply the kustomization directory to the cluster
+kubectl apply -k eks
+
+ + +

+ Kubernetes dashboard +

+

+ To open the dashboard run + + kubectl proxy + + and navigate to: +

+

+ http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#!/login +

+
# Get a login token with
+kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep eks-admin | awk '{print $1}')
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/dev-logs/2021-12-jnj-ensembledev-deployment.html b/docs/docs_output/dev-logs/2021-12-jnj-ensembledev-deployment.html new file mode 100644 index 00000000..07f0ebcc --- /dev/null +++ b/docs/docs_output/dev-logs/2021-12-jnj-ensembledev-deployment.html @@ -0,0 +1,888 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Datacoves deployment +

+

+ This document describes the deployment of the components of a datacoves system +to a JnJ EKS kubernetes cluster. +

+ + +

+ Prerequisites +

+

+ + This confluence page + + should be followed prior to the steps outlined here to deploy datacoves. It +should document how to setup an EKS cluster with the necessary prerequisites, +and how to create and configure the required AWS services used. +

+

+ We assume here that there is a EKS cluster running with certain services already +deployed on it. The cluster is setup through CI from the git repo at +https://sourcecode.jnj.com/projects/ITX-AZT/repos/ensemble. +We require the following systems running in the cluster: +

+
    +
  • + ingress-nginx as an ingress controller. +
  • +
  • + cert-manager to issue SSL certificates. +
  • +
  • + external-dns to create DNS rules from annotations. +
  • +
  • + A system that creates a new kubernetes secret with a known name with + credentials to pull docker images in every namespace of the cluster. +
  • +
+

+ The machine from where the deployments scripts will be run must have python3 and +git installed, as well as kubectl (client) version 1.21 or higher, configured +to access the cluster with broad permissions. +

+

+ We also assume the docker registry / repository that you configure to pull +images has all the docker images required. Datacoves will build and push this +images. The list of images used by a cluster, computed from this repo's +configuration, can be displayed with + + ./cli.py images ensembledev.apps.jnj.com + + , +or in general + + ./cli.py images CLUSTER_DOMAIN + + . +

+ + +

+ Initial setup and configuration +

+

+ Clone the datacoves_deployment git repository and change directory to it. +

+
git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git
+cd datacoves_deployment
+
+

+ Configuration is stored in the repo, encrypted using git-secret. You will need +to be in the repo's git secret keyring to decrypt them. Ask someone already in +the keyring for access (e.g. spelufo@its.jnj.com). +

+

+ Decrypt the configuration secrets. The + + -f + + flag will overwrite existing files. +

+
git secret reveal -f
+
+

+ The + + config + + directory holds configuration files. Each subdirectory holds +configuration for a kubernetes cluster and must be named after the cluster +domain name. For example, the configuration for the current (2021) version of +datacoves is in + + config/ensembledev.apps.jnj.com + + . +

+

+ If deploying to a new cluster, create a new directory under config based on + + config/ensembledev.apps.jnj.com + + . You will need to use + + git secret add + + and + + git secret hide + + to add your new secrets to the repo and encrypt them before +commiting them. +

+ + +

+ Deploying datacoves core web application +

+

+ First, make sure your kubectl context is appropiate for the cluster. +

+
CLUSTER_DOMAIN=ensembledev.apps.jnj.com
+KCTX=$(kubectl config current-context)
+
+# Deploy the datacoves core api server to the core namespace.
+./cli.py setup_core "$KCTX" "$CLUSTER_DOMAIN"
+
+

+ Enter an api server pod and run database migrations: +

+
kubectl -n core exec -it $(kubectl -n core get pods -l app=core-api -o name) -- bash
+
+# From inside the pod:
+./manage.py migrate
+./manage.py loaddata */fixtures/*
+
+

+ Check the server is running: +

+
$ kubectl -n core get pods
+NAME                                   READY   STATUS    RESTARTS   AGE
+core-api-deployment-5f8f64cf69-6rvhd   1/1     Running   0          3d19h
+
+ + +

+ Deploying datacoves project operator +

+

+ The datacoves project operator manages two + + CRDs + + : +datacoves.com/Project and datacoves.com/User. To deploy the operator, run: +

+
./cli.py setup_operator "$KCTX" "$CLUSTER_DOMAIN"
+
+

+ To check the operator is running, and/or see its logs: +

+
$ kubectl -n operator-system get pods
+NAME                                          READY   STATUS    RESTARTS   AGE
+operator-controller-manager-78cc7cfb6-9ddkw   2/2     Running   0          47h
+
+$ kubectl -n operator-system logs -l control-plane=controller-manager -c manager -f
+
+ + +

+ Deploying a datacoves project namespace +

+

+ Every project is deployed to a namespace named + + dcp-{project_name} + + . The +setup_project script creates a new namespace and project kubernetes object from +the configuration file in + + config/{cluster_domain}/projects/{project_name}.yaml + + . +The operator will detect changes to this object and create deployments and other +resources for the project. +

+
PROJECT_NAME=emeadev
+./cli.py setup_project "$KCTX" "$CLUSTER_DOMAIN" "$PROJECT_NAME"
+
+

+ To watch for pod status changes as the operator create's the project resources: +

+
kubectl -n "dcp-$PROJECT_NAME" get pods --watch
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/dev-logs/2022-04-jnj-artemisdev-configuration.html b/docs/docs_output/dev-logs/2022-04-jnj-artemisdev-configuration.html new file mode 100644 index 00000000..f4de7973 --- /dev/null +++ b/docs/docs_output/dev-logs/2022-04-jnj-artemisdev-configuration.html @@ -0,0 +1,753 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Configuring datacoves +

+

+ Requirements: Access to a datacoves configuration git repo and being in it's git secret keyring. +

+

+ First pull the latest changes and reveal the git secrets. +

+
git checkout main
+git pull
+git secret reveal -f
+
+

+ I've marked with + + TODO + + the values that need to be filled in: +

+
    +
  • + Airflow DB connection in: + + environments/dev123/airflow.secret.yaml + +
  • +
  • + Airflow EFS volume_handle (fs id) in: + + environments/dev123/airflow.secret.yaml + +
  • +
  • + Datacoves api DB host ( + + DB_HOST + + ) and password ( + + DB_PASS + + ) in + + secrets/core-api.env + +
  • +
  • + PING_CLIENT_ID and PING_CLIENT_SECRET in + + secrets/core-api.env + +
  • +
+

+ After editing those files to add the required values commit the changes with: +

+
git secret hide
+git diff # Review your changes, all sensitive data should be encrypted.
+git add .
+git commit -m 'Updated secrets.'
+git push
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/dev-logs/2022-04-jnj-ensembletest-deployment.html b/docs/docs_output/dev-logs/2022-04-jnj-ensembletest-deployment.html new file mode 100644 index 00000000..5adcbe8f --- /dev/null +++ b/docs/docs_output/dev-logs/2022-04-jnj-ensembletest-deployment.html @@ -0,0 +1,743 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Datacoves deployment +

+

+ This repository contains the datacoves installation scripts. They install datacoves to an existing EKS cluster, based on the configuration files in the + + config + + directory. Configuration for each cluster is kept in a separate repository. They are mounted as git submodules under + + config/{cluster_domain} + + . +

+

+ Prior to this, the EKS cluster and other required AWS resources must be created. The clusters are created through CloudX pipelines, from + + cluster.yaml + + files in other repositories like + + itx-ank/ensemble + + . Additional AWS resources are created using terraform from the + + iac + + repository. +

+

+ Once these prerequisites are done, and the configuration repository for the cluster has been updated accordingly, the installation is as follows. +

+
# Set these as needed for your cluster.
+cluster_domain=ensembletest.apps.jnj.com
+kubectl_context=itx-ank-ensemble-test
+
+# Clone this repository into the installation workstation.
+git clone https://sourcecode.jnj.com/scm/asx-ahrx/datacoves_deployment.git
+cd datacoves_deployment
+git submodule update --init
+
+# Reveal the secrets in the config submodule directory.
+(cd config/$cluster_domain; git secret reveal -f)
+
+# Install python dependencies for the installation scripts.
+pip3 install --user -r requirements.txt
+
+# Install datacoves base dependencies into the cluster (ingress-nginx, etc.)
+./cli.py setup_base $kubectl_context $cluster_domain
+
+# Install datacoves.
+./cli.py install $kubectl_context $cluster_domain
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/dev-logs/2022-05-setup-aks-postgres-flexible-server.html b/docs/docs_output/dev-logs/2022-05-setup-aks-postgres-flexible-server.html new file mode 100644 index 00000000..4190a5c1 --- /dev/null +++ b/docs/docs_output/dev-logs/2022-05-setup-aks-postgres-flexible-server.html @@ -0,0 +1,788 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Set up postgres flexible server on Azure +

+
    +
  1. + Find it + + here + +
  2. +
  3. + Connect to it using this command: +
  4. +
+
psql -h datacoves-east-us.postgres.database.azure.com -U dcmaster -d postgres
+
+
    +
  1. + Create the + + datacoves + + user that will be used by Django: +
  2. +
+
CREATE USER datacoves password '<PASSWORD>';
+ALTER USER datacoves CREATEDB CREATEROLE;
+GRANT datacoves TO dcmaster;
+CREATE DATABASE datacoves OWNER datacoves;
+GRANT CONNECT ON DATABASE datacoves TO datacoves;
+
+
    +
  1. + Dump data from internal Database +
  2. +
+
pg_dump -U postgres -h postgres-svc -d datacoves -Fc > dump.sql
+
+
    +
  1. + Restore data on new Azure DB +
  2. +
+
pg_restore -U datacoves -h datacoves-east-us.postgres.database.azure.com -d datacoves --no-owner --role=datacoves dump.sql
+
+
    +
  1. + Repeate steps 4 and 5 with the rest of the services that need to be migrated +
  2. +
+

+ Keep in mind that database objects owner could be changed, reassign the owner to the corresponding service account, i.e.: +

+
REASSIGN OWNED BY datacoves TO dev123_airbyte;
+
+

+ If migrating + + temporal + + and + + temporal_visibility + + databases, you also need to update the database name on + + schema_versions + + . +

+
    +
  1. +

    + Set + + airbyte_db_external: true + + , + + airflow_db_external: true + + and + + superset_db_external: true + + accordingly +

    +
  2. +
  3. +

    + Configure + + postgres_db_provisioner + + using the master user connection/credentials +

    +
  4. +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/dev-logs/index.html b/docs/docs_output/dev-logs/index.html new file mode 100644 index 00000000..31d10174 --- /dev/null +++ b/docs/docs_output/dev-logs/index.html @@ -0,0 +1,688 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/administrate-east-us-a-aks-cluster.html b/docs/docs_output/how-tos/administrate-east-us-a-aks-cluster.html new file mode 100644 index 00000000..892afe2b --- /dev/null +++ b/docs/docs_output/how-tos/administrate-east-us-a-aks-cluster.html @@ -0,0 +1,792 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Administrate east-us-a AKS cluster +

+ + +

+ Permissions +

+
    +
  1. + Ask an administrator to create you a datacoves (microsoft) user. https://admin.microsoft.com. +
  2. +
  3. + Ask an administrator to add you to the + + DevOps + + + group + + . +
  4. +
+ + +

+ Configure kubectl +

+

+ + Download Azure CLI + + . +

+

+ Login to your account: +

+
az login
+
+

+ Then, run the following commands: +

+
az account set --subscription 91bd2205-0d74-42c9-86ad-41cca1b4822b
+az aks get-credentials --resource-group datacoves --name east-us-a
+
+

+ This will add a new context to + + kubectl + + , so you can now run: +

+
kubectl get pods -A
+
+ + +

+ Manage nodepools +

+ + +

+ List nodepools +

+

+ List nodepools in the + + datacoves + + resource group, + + east-us-a + + cluster: +

+
az aks nodepool list --cluster-name east-us-a --resource-group datacoves
+
+ + +

+ Add workers nodepool +

+
 az aks nodepool add --cluster-name east-us-a --resource-group datacoves --name workerslarge --mode User --enable-cluster-autoscaler --min-count 1 --max-count 10 --node-vm-size Standard_D4s_v3 --labels k8s.datacoves.com/workers=enabled
+
+ + +

+ Modify existing nodepool to add new labels +

+

+ Let's add a new label + + k8s.datacoves.com/workers=enabled + + to an existing nodepool which already has the label + + k8s.datacoves.com/nodegroup-kind=general + + . Old a new labels need to be specified. +

+
az aks nodepool update --cluster-name east-us-a --resource-group datacoves --name generallarge --labels {k8s.datacoves.com/workers=enabled,k8s.datacoves.com/nodegroup-kind=general} 
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/airflow-configuration.html b/docs/docs_output/how-tos/airflow-configuration.html new file mode 100644 index 00000000..890d1c7f --- /dev/null +++ b/docs/docs_output/how-tos/airflow-configuration.html @@ -0,0 +1,719 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Environment variables override +

+

+ Airflow has a feature that lets you override system's defaults on a per-task basis (see https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/executor/kubernetes.html#pod-override). +

+

+ + Example "Log level override" + + : +

+
"pod_override": k8s.V1Pod(
+    spec=k8s.V1PodSpec(
+        containers=[
+            k8s.V1Container(
+                name="base",
+                image=f"{IMAGE_REPO}:{IMAGE_TAG}",
+                env=[
+                    k8s.V1EnvVar(
+                        name="AIRFLOW__LOGGING__LOGGING_LEVEL",
+                        value="DEBUG"
+                    )
+                ]
+            )
+        ]
+    )
+),
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/billing-system.html b/docs/docs_output/how-tos/billing-system.html new file mode 100644 index 00000000..2db4e4e7 --- /dev/null +++ b/docs/docs_output/how-tos/billing-system.html @@ -0,0 +1,1050 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Datacoves Billing System +

+

+ This document provides comprehensive information on Datacoves’ billing integration with Stripe. +

+ + +

+ Introduction +

+

+ Datacoves integrates with Stripe to manage billing by listening to Stripe events and adjusting Datacoves data accordingly. The system also modifies subscriptions when changes occur in services, users, or tally marks. +

+

+ The connection between Datacoves and Stripe begins when a user creates a subscription through the Setup Wizard, or when a Datacoves Admin sets up a subscription directly in the Stripe UI. +

+

+ + Note + + : Free trial accounts + + are not connected + + to Stripe. +

+ + +

+ Account Setup Wizard +

+

+ Account Setup Wizard +

+ + +

+ Customer Types +

+

+ For billing, we distinguish between three customer types: +

+
    +
  1. + Free trial customers +
  2. +
  3. + Credit card customers +
  4. +
  5. + Check / bank transfer customers +
  6. +
+ + +

+ Free trial customers +

+

+ These type of customers are not connected to stripe while they're on trial. During the trial period, Stripe does not have information about these accounts. +

+

+ Free trial customers will see a button on the header inviting them to finalize the trial and create a subscription. Upon subscribing, they transition to + + credit card customers + + . +

+ + +

+ Credit card customers +

+

+ Credit card customers workflow is completely managed by Datacoves: +

+
    +
  1. +

    + Customer selects + + Growth Plan + + and after clicking on + + Next + + Datacoves creates the stripe customer, sets the + + customer_id + + and redirects them to Stripe billing page where the stripe billing process begins. +

    +
  2. +
  3. +

    + Once the customer enters their credit card and completes the Stripe billing process, Datacoves receives a notification and sets the + + subscription + + payload on the brand new account. +

    +
  4. +
  5. +

    + From this point, any updates to services/users/tally marks in the Datacoves account are automatically reflected in Stripe, adjusting invoices accordingly. +

    +
  6. +
+ + +

+ Check / bank customers +

+

+ For customers preferring bank transfers or checks, setup is managed manually through the Stripe UI. +

+ + +

+ Customer Setup +

+
    +
  1. + Customer creates a Trial account as described earlier. +
  2. +
  3. + An Stripe Admin + + manually creates the customer + + using the Stripe UI. In order to follow the same convention used by Datacoves, please use account's + + slug + + as the stripe customer name, and account's + + owner email + + as the stripe customer email. Add an address to calculate taxes automatically. +
  4. +
  5. + Once you got a customer id on stripe, modify the Datacoves account on the admin panel and set it on the + + Customer Id + + field. +
  6. +
  7. + Modify the Datacoves account once more and set the right + + plan + + and + + variant + + . We typically use + + growth + + plan for these accounts, the + + variant + + will be determined by Sales depending on the pricing negotiated. +
  8. +
+ + +

+ Subscription Setup +

+

+ The Stripe Admin now + + creates a subscription on Stripe + + for the recently created customer. Please be cautious with the products included in the subscription, they should match exactly the products included in the plan. You can inspect them + + here + + . +

+

+ You don't need to add the metered products on a new subscription, Datacoves will modify the subscription and add them later. Unless the customer prepaid for developer seats and services, you include the developers seat product specifying the total user licenes and then one product line per service (airbyte, airflow, superset, datahub). +In the following example, there are 5 user licences, 1 Airbyte, 1 Airflow and 1 Superset server: +

+

+ Create Subscription +

+

+ NOTE: Certain customers (like Guitar Center) could opt to prepay the developer seats and services costs via Bank transfer / check. In those cases, you only include the metered products in the subscription. +

+

+ Don't forget to set the right plan on the subscription metadata, it's usually + + growth-monthly + + , if you need a different one, type the + + slug + + field of the chosen one. +

+

+ Subscription Plan +

+

+ On Payment, select + + Email invoice to the customer to pay manually + + and uncheck + + Include a stripe-hosted link to an invoice payment page in the invoice + + . +

+

+ Finalize by clicking on + + Create subscription + + . +

+

+ Go to the Django admin panel and check that the account has a + + JSON + + dict on the field + + subscription + + . If it does, it means the connection is set, you can now finalize the trial by setting a past end date in the + + Trial ends at + + field (or by just removing trial start and end dates). +

+ + +

+ Add credit to customer +

+

+ Once the subscription was created, the customer will start generating a debt. +As soon as Datacoves receives a check or wire, a Stripe Admin needs to register it on the Django Admin, as follows: +Note that credits have a validity period, during that period the developer licences or services specified will be discounted from the invoice. +

+
    +
  1. + Go to Accounts, select the customer's account and edit it. +
  2. +
  3. + Scroll down until you see the + + Credits + + area. +
  4. +
  5. + Click on + + Add another credit + + and complete the required fields including as much information as possible in the reference field. +
  6. +
  7. + Click on + + Save + + . +
  8. +
+

+ Add credit to Account +

+ + +

+ F.A.Q. +

+ + +

+ How do I configure my local environment to test Stripe? +

+

+ First of all, you need to set to + + True + + the feature + + accounts_signup + + on the only record you have in the + + Cluster + + model. +

+

+ Then, if you're using + + datacoveslocal.com + + and you were granted permissions automatically to the + + local + + account, you need +to remove all the permissions to such account, doing that the Datacoves UI will allow you creating a new account using the +setup wizard. +

+

+ You should also set + + setup enabled + + on + + True + + on the admin panel for you user. +

+

+ Then, navigate to https://datacoveslocal.com/account-setup/ and follow the instructions to create an account using Stripe. +

+ + +

+ How do I run the stripe webhooks locally to test billing integration? +

+

+ Run + + ./cli.py stripe_webhooks + + and follow the instructions. +

+ + +

+ How to sync stripe live products with test products? +

+

+ Sometimes you modified the live products (prices/descriptions) and you need to update the test ones. +

+

+ Just run + + ./cli.py copy_to_stripe_test + + to run the live -> test sync process. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/celery-monitoring.html b/docs/docs_output/how-tos/celery-monitoring.html new file mode 100644 index 00000000..108ef161 --- /dev/null +++ b/docs/docs_output/how-tos/celery-monitoring.html @@ -0,0 +1,761 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Celery monitoring +

+

+ For authoritative, more detailed information, see + + celery's monitoring guide + + . +

+ + +

+ UI +

+

+ We run the flower UI at + + https://flower.{cluster_domain} + + . You can see executed +tasks by clicking on tasks, or navigating to + + https://flower.{cluster_domain}/tasks + + . +You'll want to sort tasks to see the latest Started or Received at the top. +You can filter by task using the Search input. The UI doesn't refresh live. +Increasing the number of shown entries can be helpful. +

+ + +

+ CLI +

+

+ From a core-api pod ( + + kcc exec -it $api_pod_name -- bash + + ) you can invoke +celery inspect. One useful thing to do is check the stats. +

+
celery -A datacoves inspect stats
+
+

+ Here's an excerpt from the output. +

+
...
+        "total": {
+            "billing.tasks.inform_billing_events": 113,
+            "billing.tasks.tally_account_resource_usage": 1,
+            "billing.tasks.tally_resource_usage": 1,
+            "celery.backend_cleanup": 1,
+            "clusters.workspace.sync_task": 1211,
+            "iam.tasks.clear_tokens": 1,
+            "iam.tasks.remove_missing_user_groups": 1,
+            "notifications.tasks.send_slack_notification": 7,
+            "projects.tasks.delete_unused_project_keys": 1,
+            "projects.tasks.remove_unused_environments": 1,
+            "projects.tasks.remove_unused_user_volumes": 1,
+            "projects.tasks.stop_sharing_codeservers": 38,
+            "projects.tasks.turn_off_unused_workspaces": 1134
+        },
+        "uptime": 68132
+...
+
+

+ The uptime is 68132 seconds, and the sync_task has run 1211 times, so there's +been one run every 56 seconds in average. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/choose-ec2-nodes.html b/docs/docs_output/how-tos/choose-ec2-nodes.html new file mode 100644 index 00000000..1046e77e --- /dev/null +++ b/docs/docs_output/how-tos/choose-ec2-nodes.html @@ -0,0 +1,749 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Choosing an ec2 instance type and estimating pods per node +

+

+ + AWS docs. + +

+ + +

+ Pod limit from network constraints +

+

+ Every pod must have an IP. EC2 instances have a maximum number of IPs, which +limits the number of pods per node. + + source + +

+

+ With CNI version 1.9 or higher and nitro instances, + + the pod limit can be increased + + . +For example: +

+
 $ ./max-pods-calculator.sh --instance-type m5.large --cni-version 1.9.0
+29
+
+ $ ./max-pods-calculator.sh --instance-type m5.large --cni-version 1.9.0 --cni-prefix-delegation-enabled
+110
+
+# For ensembledev.apps.jnj.com:
+$ ./max-pods-calculator.sh --instance-type m5.4xlarge --cni-version 1.7.1
+110
+
+

+ + List of ENI and IP limits per instance type + + . +

+ + +

+ Pod limit from volume attachment limits +

+

+ Currently some of our pods (code-server) require ELB volumes. EC2 instances have +a maximum number of volumes that can be attached. For "most" nitro instances, the +sum of ENIs, volume attachments and instance store volumes must be less than 28. + + source + + . Volume attachments seem capped by 26 because the mount +points use the a letter of the alphabet each. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/codeserver-images.html b/docs/docs_output/how-tos/codeserver-images.html new file mode 100644 index 00000000..3a490845 --- /dev/null +++ b/docs/docs_output/how-tos/codeserver-images.html @@ -0,0 +1,747 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ +

+ Check the versions, but these are the standard Datacoves VS Code extensions: +

+

+ SQLFluff is a SQL linter with dbt support +https://datacoves-vs-code-images.s3.amazonaws.com/dorzey.vscode-sqlfluff-3.2.0.vsix +

+

+ This extensions adds grid (excel like) editing for CSV files +https://datacoves-vs-code-images.s3.amazonaws.com/janisdd.vscode-edit-csv-0.10.0.vsix +

+

+ Standard VS Code Python extension +https://datacoves-vs-code-images.s3.amazonaws.com/ms-python.python-2024.14.1.vsix +

+

+ This adds yml validations +https://datacoves-vs-code-images.s3.amazonaws.com/redhat.vscode-yaml-1.15.0.vsix +

+

+ This adds "short cuts" to VS Code like the "run current model" and "more.." button +https://datacoves-vs-code-images.s3.amazonaws.com/RobertOstermann.better-status-bar-1.0.9.vsix +

+

+ This adds Jinja support, I think it is dbt-jinja +https://datacoves-vs-code-images.s3.amazonaws.com/samuelcolvin.jinjahtml-0.20.0.vsix +

+

+ This adds items to the file context menu like "Duplicate" +https://datacoves-vs-code-images.s3.amazonaws.com/sleistner.vscode-fileutils-3.10.3.vsix +

+

+ This adds spell checking +https://datacoves-vs-code-images.s3.amazonaws.com/streetsidesoftware.code-spell-checker-3.0.1.vsix +

+

+ This is our Power User Extension that adds things like query preview and near real time linting +https://datacoves-vs-code-images.s3.amazonaws.com/vscode-datacoves-power-user-0.9.16.vsix +

+

+ Python Ruff linter, main use case is to show vars and imports not being used in a .py file +https://datacoves-vs-code-images.s3.amazonaws.com/charliermarsh.ruff-2024.56.0.vsix +

+

+ This adds colors to each column of a CSV file +https://datacoves-vs-code-images.s3.amazonaws.com/mechatroner.rainbow-csv-3.3.0.vsix +

+

+ This is part of the Datacoves install for Snowflake Envs +https://datacoves-vs-code-images.s3.amazonaws.com/snowflake.snowflake-vsc-1.10.5.vsix +

+

+ SQLTools I cant find where you got this from on github and it no longer in Orrum since I deleted it. +It is used on non-Snowflake envs like Artemis +

+

+ This is a chat gpt extension that is NOT our default, but has been added in a few places, like orrum and cold bore. Datacoves co-pilot will make this obsolete +https://datacoves-vs-code-images.s3.amazonaws.com/timkmecl.chatgpt-1.1.2.vsix +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/connect-to-kenvue-cluster-using-a-bastion.html b/docs/docs_output/how-tos/connect-to-kenvue-cluster-using-a-bastion.html new file mode 100644 index 00000000..8ace2cd1 --- /dev/null +++ b/docs/docs_output/how-tos/connect-to-kenvue-cluster-using-a-bastion.html @@ -0,0 +1,782 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to connect to kenvue cluster using a bastion +

+ + +

+ SSH to bastion +

+

+ ssh + + @AWSWEXNVAL0001.kenvue.com + +

+ + +

+ Set up your user enviornment +

+

+ Install kubectl and aws-iam-authenticator +

+
mkdir bin
+cd bin
+curl -Lo aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.5.9/aws-iam-authenticator_0.5.9_linux_amd64
+chmod +x aws-iam-authenticator
+
+cd ..
+curl -Lo kuberlr.tar.gz https://github.com/flavio/kuberlr/releases/download/v0.4.2/kuberlr_0.4.2_linux_amd64.tar.gz
+tar -xzvf kuberlr.tar.gz
+
+cd kuberlr_0.4.2_linux_amd64/
+mv kuberlr ../bin/
+cd ../bin
+ln -s kuberlr kubectl
+cd ..
+
+ + +

+ Configure your ~/.kube/config +

+
mkdir .kube
+cat << EoF > .kube/config2
+apiVersion: v1
+clusters:
+- cluster:
+    server: https://BD0F1A58014FCF446B668A876EE7DF2A.gr7.us-east-1.eks.amazonaws.com
+    certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1UQXlOVEV4TlRNMU1Gb1hEVE15TVRBeU1qRXhOVE0xTUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT2JpCmFhOUFvSDVlWGpMeFdnQzBONE5JUHVQSVptNmpLNmxBM29sTVAwUHYyd1hlalphcEFsVnFOWVdxcHl3aCtZZm8KT1lLR1Nuc2hPdE9DbnVyU094SVhoY1BnR1ZmN1REVlZGbU04WW5KSzBmOHdLWmxLdDNIYU9oWFJkekNZYkJoMgoydnpZSGx0ZGREbHkvTHpwaWpNQlpNRHY1UUtkeEhNSEF0aUd6aG4xS2xvT2xkRGozV1lpV1VJV0ladzZheWV2CnNhYm1Rd3A1REJwQjBVN3V2bEdMd1RUQ3RZc3NhdnI2dDZ6MWtzNHhNUUMxVTlONUlHV0UxdEUrZGZwMmZzWDYKZ3d1c0tEOGNESkFiVmFrL2lwK3pkcXRxRnJHOVFNeDBEelpQYzRtU1dnVDZyVXZjbTlBbTlrMVNsSXc5ODlGRApHelh6bGxQcXZySWNnU1RWSW9jQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZLNnJEeXBRK3VReGgxWU8zS0JKbmthYU1TNUdNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCdk52clZjRjFaZ1FDMzNpbDZrR0gzcHJJN3RWRmcvOTF3UVNZZkM2SFM2cWRiVERucwpNYXhoeEYvblZzbFEyKzRmN0UxVUZodUdsOUdUZlVvS2FiQzB1cWx6bUpQaDJVUXJRZ3hZQnd3eGxTOSszcHJNCnlUOGZ5M29uM21jaWR0azZlSllIcm5wZS9QZnlWN1J5eUhva0pVVGIwcWFVakxoMVZHVFoyRmJLK0ZjeG50SHcKdWJ4bnlSMHZlcGExdDFoOVljNDFJYnFzUGRBMVFDZVYvR1hNdWN4Z0U4bUd1VFZQQlU1MEdYbG1qWnRZVjg5dgp3TVpYTVVobzNmakdQNVVnMnlFTmtXaW9Ra2hqUkRMRUZGQXpZUzMrSU5TWnAwMklBUTRRNkNSYnJ0Vmc5ZDFrCkY4d1FzaytJUXUrMnE3T25WOUs5cUdYeXdrakNSd0ZTV1N2UwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
+  name: kubernetes
+contexts:
+- context:
+    cluster: kubernetes
+    user: aws
+  name: aws
+- context:
+    cluster: kubernetes
+    user: aduser
+  name: user
+current-context: aws
+kind: Config
+preferences: {}
+users:
+- name: aws
+  user:
+    exec:
+      apiVersion: client.authentication.k8s.io/v1beta1
+      command: aws-iam-authenticator
+      args:
+        - "token"
+        - "-i"
+        - "itx-wcr-datacove-development"
+        - "-r"
+        - "arn:aws:iam::551241293703:role/itx/service/EKS/VPCxEKSRole"
+- name: aduser
+  user:
+    auth-provider:
+      config:
+        apiserver-id: "22f9d484-b818-4b21-a278-00b264446505"
+        client-id: "22f9d484-b818-4b21-a278-00b264446505"
+        environment: AzurePublicCloud
+        tenant-id: "7ba64ac2-8a2b-417e-9b8f-fcf8238f2a56"
+      name: azure
+EoF
+
+ + +

+ Connect to cluster +

+
kubectl get nodes
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/custom-dns.html b/docs/docs_output/how-tos/custom-dns.html new file mode 100644 index 00000000..b60bd5a2 --- /dev/null +++ b/docs/docs_output/how-tos/custom-dns.html @@ -0,0 +1,746 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ About this Documentation +

+

+ Some customers (like Orrum) require a custom internal DNS. This will require adding a new coredns custom config map: +

+
apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: coredns-custom
+  namespace: kube-system
+data:
+  sftp.orrum.com.server: |
+    sftp.orrum.com:53 {
+        forward . 172.31.150.10 172.31.160.20
+    }
+
+

+ Change 'sftp.orrum.com' to whatever pattern needs to go to the custom DNS, and the IP addresses to the addresses of the DNS servers to resolve the address. +

+

+ Then you can patch the coredns deployment: +

+
kubectl -n kube-system patch deployment coredns \
+  --type='json' \
+  -p='[
+    {
+      "op": "add",
+      "path": "/spec/template/spec/volumes/-",
+      "value": {
+        "name": "custom-coredns",
+        "configMap": {
+          "name": "coredns-custom"
+        }
+      }
+    },
+    {
+      "op": "add",
+      "path": "/spec/template/spec/containers/0/volumeMounts/-",
+      "value": {
+        "name": "custom-coredns.server",
+        "mountPath": "/etc/coredns/custom"
+      }
+    }
+  ]'
+
+

+ Then restarts the deployment: +

+
kubectl rollout restart deployment coredns -n kube-system
+
+

+ And test with nslookup: +

+
kubectl -n core exec -ti workbench-c6599969b-k4p5w -- nslookup sftp.orrum.com
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/datacoves-versioning.html b/docs/docs_output/how-tos/datacoves-versioning.html new file mode 100644 index 00000000..ecb845cc --- /dev/null +++ b/docs/docs_output/how-tos/datacoves-versioning.html @@ -0,0 +1,832 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Datacoves versioning +

+

+ We use + + semantic versioning + + in all our images, and datacoves releases. +

+

+ + MAJOR.MINOR.PATCH + +

+

+ where + + MAJOR.MINOR + + are read from + + .version.yaml + + and used every time a new image is +pushed to docker repository and + + PATCH + + is autogenerated (timestamp). +

+ + +

+ Our criteria +

+ + +

+ When do we bump the + + MAJOR + + version? +

+

+ When we make incompatible changes or we introduce compatible changes but deprecate features: +

+
    +
  • + Any python library upgrade (including dbt) that requires changes in the customer's analytics(dbt) git repo +
  • +
  • + Airbyte, Airflow, DataHub, Superset upgrades that require reconfiguration +
  • +
  • + Datacoves core changes that require human intervention +
  • +
  • + Airbyte, Airflow, DataHub, Superset that do not require reconfiguration, but several features are being deprecated +
  • +
+ + +

+ When should we bump the + + MINOR + + version? +

+
    +
  • + When we make compatible changes, such as new features or upgrade dependencies +
  • +
  • + Patch version changes to dbt e.g. 1.8.3 to 1.8.5 +
  • +
  • + Compatible updates to dbt e.g. 1.7.x to 1.8.x +
  • +
  • + Compatible update to Airbyte, Airflow, DataHub, Superset that do not require reconfiguration +
  • +
+ + +

+ Everything else is a + + PATCH + +

+
    +
  • + Bug fixes, performance enhancements +
  • +
+ + +

+ Images tags +

+

+ Images are pushed with the folling tags: +

+
    +
  • + MAJOR +
  • +
  • + MAJOR.MINOR +
  • +
  • + MAJOR.MINOR.PATCH +
  • +
  • + MAJOR.MINOR.PATCH-\<commit sha> +
  • +
  • + latest +
  • +
+

+ CI servers that eventually use datacoves images could reference any of them, depending on how specific they need to be. +

+ + +

+ Releases +

+

+ Releases follow the same versioning criteria, they are generated by running the + + ./cli.py generate_release + + command, or by triggering the + + Generate Release + + GitHub workflow. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/debug-airflow-workers.html b/docs/docs_output/how-tos/debug-airflow-workers.html new file mode 100644 index 00000000..87a036d7 --- /dev/null +++ b/docs/docs_output/how-tos/debug-airflow-workers.html @@ -0,0 +1,904 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Debug Airflow Workers +

+ + +

+ How to review if there are error in git-sync/s3-sync containers? +

+

+ We have already enabled the functionality in + + git-sync + + to retry a maximum of three times. If the synchronization with + + git-sync + + or + + s3-sync + + is not successful, the worker will fail, therefore the Airflow task will also fail. +

+

+ To get the logs from + + git-sync + + or + + s3-sync + + we need to filter by namespace and by container. Below are some examples of how to do it. +

+
    +
  1. + Go to + + Grafana + + i.g. + + https://grafana.<domain> + +
  2. +
  3. +

    + Go to + + Explore + + select + + Loki + + datasource and perform the query with the following filters: +

    +
  4. +
  5. +

    + + Namespace + + = + + dcw-my-slug-environment + +

    +
  6. +
  7. + + Container + + =~ + + git-sync + + + / + + s3-sync**/ +
  8. +
+

+ Examples: +

+
# git-sync
+{namespace="dcw-dnr240", container="git-sync"} |= ``  
+
+# s3-sync
+{namespace="dcw-dnr240", container="s3-sync"} |= ``  
+
+

+ Find log for git-sync or s3-sync +

+ + +

+ How to get Airflow workers? +

+
    +
  1. + Go to + + Grafana + + i.g. + + https://grafana.<domain> + +
  2. +
  3. +

    + Go to + + Explore + + select + + Loki + + datasource and perform the query with the following filters: +

    +
  4. +
  5. +

    + + Namespace + + = + + dcw-my-slug-environment + +

    +
  6. +
  7. + + Pod + + =~ + + my-slug-environmet-airflow-scheduler. + + * +
  8. +
  9. + + Line contains + + |= + + my-task + +
  10. +
+

+ + Note: Remember that you have to adjust the date and time parameters depending on the search you want to perform. + +

+

+ E.g.: +

+
{namespace="dcw-prd001", pod=~"prd001-airflow-scheduler.*"} |= `t_id_MDM_extraction_V_ENS2_SALES_ADJUSTMENTS_streamsets`
+
+

+ Find pod by task +

+
    +
  1. + Copy the pod name +
  2. +
  3. +

    + Go to + + Explore + + select + + Loki + + and perform the query with the following filters: +

    +
  4. +
  5. +

    + + Namespace + + = + + dcw-my-slug-environment + +

    +
  6. +
  7. + + Pod + + = + + pod-name + +
  8. +
+

+ E.g.: +

+
{namespace="dcw-prd001", pod="emeaelmdmprdtidmdmextractionve-295567f106ff46139ad4edf24e52fc31"} |= ``
+
+

+ Find pod by name +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/debug-dbt-errors-when-return-code-is-not-zero.html b/docs/docs_output/how-tos/debug-dbt-errors-when-return-code-is-not-zero.html new file mode 100644 index 00000000..8b4d081a --- /dev/null +++ b/docs/docs_output/how-tos/debug-dbt-errors-when-return-code-is-not-zero.html @@ -0,0 +1,737 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to debug dbt on production environments, i.e. Airflow? +

+

+ Sometimes when you run a dbt command on the command line, i.e. + + dbt deps + + , + + dbt compile + + , there are silent errors, and you just got an errorcode > 0. +

+

+ To debug it, you should run it programatically using python: +

+ + +

+ Run python in the command line +

+
$ python
+
+ + +

+ Run the desired command right in the python console +

+
from dbt.cli.main import dbtRunner, dbtRunnerResult
+
+# initialize
+dbt = dbtRunner()
+
+# create CLI args as a list of strings
+cli_args = ["deps"]
+
+# run the command
+res: dbtRunnerResult = dbt.invoke(cli_args)
+
+# inspect the results
+for r in res.result:
+    print(f"{r.node.name}: {r.status}")
+
+

+ To know more, see https://docs.getdbt.com/reference/programmatic-invocations. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/grafana-grant-permisions.html b/docs/docs_output/how-tos/grafana-grant-permisions.html new file mode 100644 index 00000000..74564a02 --- /dev/null +++ b/docs/docs_output/how-tos/grafana-grant-permisions.html @@ -0,0 +1,728 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/grafana-loki-storage-config-providers.html b/docs/docs_output/how-tos/grafana-loki-storage-config-providers.html new file mode 100644 index 00000000..87952b5c --- /dev/null +++ b/docs/docs_output/how-tos/grafana-loki-storage-config-providers.html @@ -0,0 +1,1075 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Grafana Loki Storage +

+ + +

+ Providers +

+ + + +

+ AWS S3 +

+ + +

+ Permission +

+

+ Limited: List, Read, Write +

+
{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:PutObject",
+        "s3:GetObject",
+        "s3:GetObjectVersion",
+        "s3:DeleteObject",
+        "s3:DeleteObjectVersion"
+      ],
+      "Resource": ".../*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": "s3:ListBucket",
+      "Resource": "..."
+    }
+  ]
+}
+
+ + +

+ Create and Configure Life Cycle +

+
    +
  1. + Find and select + + S3 Services + + . +
  2. +
  3. + Click on + + Create Bucket + + . +
  4. +
  5. + + General configuration + + you must choose the region y the name of bucket. Other values can be defaulted. +
  6. +
  7. + We need to create two + + Lifecycle rules + + to rotate our logs. +
  8. +
  9. + Select the new Bucket and then select + + Management + + tab. +
  10. +
  11. + Click + + Create lifecycle rule + + . +
  12. +
  13. + + Lifecycle rule configuration + + you have to fill in the name: e.g: + + Delete all fake objects after 30 days + + . +
  14. +
  15. + + Prefix + + you fill in + + fake/ + +
  16. +
  17. + + Lifecycle rule actions + + you has to check + + Expire current versions of objects + + , +
  18. +
  19. + + Days after object creation + + you must complete + + 30 + +
  20. +
  21. + Save changes. +
  22. +
  23. + + Lifecycle rule configuration + + you have to fill in the name: e.g: + + Delete all index objects after 30 days + +
  24. +
  25. + + Prefix + + you fill in + + index/ + +
  26. +
  27. + + Lifecycle rule actions + + you has to check + + Expire current versions of objects + + , +
  28. +
  29. + + Days after object creation + + you must complete + + 30 + +
  30. +
  31. + Save changes. +
  32. +
+

+ + We must have two rules. + +

+

+ Lifecycle rule configuration +

+

+ + Example of a rule. + +

+

+ Lifecycle rule configuration +

+

+ Lifecycle rule configuration +

+ + +

+ Azure Blob Storage +

+ + +

+ Create and configure Azure Blob Storage +

+
    +
  1. + Create new resource + + Storage account + + . +
  2. +
  3. + Select your + + Subscription + + and + + Resource group + + . +
  4. +
  5. + Complete the + + Storage account name + + . +
  6. +
  7. + Click + + Review + + (Other values can be defaulted). +
  8. +
  9. + Click + + Create + + (Other values can be defaulted). +
  10. +
+

+ Azure blob storage account +

+
    +
  1. + Select your new + + Storage account + + . +
  2. +
  3. + Click on + + Containers + + and add new container. +
  4. +
+

+ Azure blob storage storage container +

+

+ Azure blob storage storage container +

+
    +
  1. + Select + + Lifecycle management + + and + + Add a rule + + to create new rule to rotate our logs. +
  2. +
+

+ Azure blob storage storage container +

+
    +
  1. + On + + Details + + tab we must complete the name (Delete all objects after 30 days) and select + + Limit blobs with filter + + . +
  2. +
+

+ Azure blob storage storage lifecycle +

+
    +
  1. + On + + Filter set + + tab we must add two + + Blob prefix + + : + + <container-name>/fake/ + + and + + <container-name>/index/ + + . +
  2. +
+

+ Azure blob storage storage lifecycle +

+
    +
  1. + Click + + Create + +
  2. +
+ + +

+ Get configuration data +

+
    +
  1. + + Account name + + is the name of + + storage account + + . +
  2. +
  3. + Click on + + Account key + + (Key1) +
  4. +
+

+ Azure blob storage account key +

+
    +
  1. + Select your + + Container + + and then + + Properties + +
  2. +
+

+ Container properties +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/grafana-loki-storage-config.html b/docs/docs_output/how-tos/grafana-loki-storage-config.html new file mode 100644 index 00000000..a8964ee1 --- /dev/null +++ b/docs/docs_output/how-tos/grafana-loki-storage-config.html @@ -0,0 +1,776 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Grafana Loki Storage Configuration +

+

+ There are three different providers to configure + + Loki + + storage: +

+
    +
  • + AWS S3 +
  • +
  • + Azure Blob Storage +
  • +
  • + Minio (Local development) +
  • +
+ + +

+ Notes +

+
    +
  • + Minio is not responsible for log rotation, the logs lifecycle must be configured in your provider. +
  • +
  • + How to configure the provider? + + here + +
  • +
+

+ To configure the cluster you must add the configuration to the configuration repository as a secret in + + <domain>/cluster-params.secret.yaml + + for example to our local environment + + datacoveslocal.com/cluster-params.secret.yaml + +

+ + +

+ Minio (Local development) +

+
grafana:
+  ...
+  loki:
+    provider: minio
+    password: ...
+
+ + +

+ AWS S3 +

+
grafana:
+  ...
+  loki:
+    provider: aws
+    region: <us-east-1>
+    access_key: ...
+    secret_key: ...
+    bucket: <bucket-name>
+
+ + +

+ Azure Blob Storage +

+
grafana:
+  ...
+  loki:
+    provider: azure
+    account_name: ...
+    account_key: ...
+    container_name: <container-name>
+    endpoint_suffix: <blob.core.windows.net>
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/hotfix.html b/docs/docs_output/how-tos/hotfix.html new file mode 100644 index 00000000..b48b8da5 --- /dev/null +++ b/docs/docs_output/how-tos/hotfix.html @@ -0,0 +1,761 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to Create a Hotfix +

+

+ A hotfix is defined as doing a targetted fix to an existing release. The idea behind a hotfix is to do the absolute minimum change to correct a high priority issue in a live release. +

+

+ To create a hotfix, one must first do the fix. First, create a branch from the release tag you wish to hot fix. Let's say you're hot-fixing release 'TAG_NAME'. You would do the following commands: +

+
git fetch --all --tags
+git checkout -b BRANCH_NAME refs/tags/TAG_NAME
+
+

+ You will now have a branch that is a copy of the release tag. You can either do your hotfix work directly on that branch and merge it to main later, or you can use + + git cherry-pick + + to pick commits from the main branch onto your new branch. If you need to use cherrypick and you don't know how, that is a larger topic than I want to cover here; Stephen can help you directly with that. +

+

+ Once you have done your work, you should + + commit + + to your branch and then compare your branch to the original tag. This will make sure you only changed what was needed: +

+
git diff BRANCH_NAME..refs/tags/TAG_NAME
+
+

+ This command + + is very important if you cherry-pick + + to make sure you don't accidentally bring additional features or code that you do not intend to. However, it is good practice to review all code going into a hotfix very carefully. +

+

+ Once you are certain your hotfix is good, + + push + + it to the git repository. Now you're ready to build a hotfix release with cli.py. Do the following command: +

+
./cli.py generate_hotfix
+
+

+ It will first show you + + git status + + to make sure your code is committed. Make sure there are no extra files or anything you don't want built into the release docker image present in your code tree. +

+

+ After you confirm, it will ask you which release you are making a hotfix from. This release must already be present in your + + releases/ + + directory; if it is not, download the release with + + ./cli.py download_releases + + or download the appropriate manifest directly from github. +

+

+ Then, it will ask you which images you wish to build. Select one or more images to build, or none if you are changing another dependency. +

+

+ After that, it will ask you if you want to change the version of any other image that is in the release. You can select none if you only want to build new images and you don't need to change any other dependencies. +

+

+ Finally, it will build your release and push it up as a draft in github. From that point, it is a normal release and you can take it through the normal process to get it installed. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/how-to-create-a-ssl-certificate.html b/docs/docs_output/how-tos/how-to-create-a-ssl-certificate.html new file mode 100644 index 00000000..1569668f --- /dev/null +++ b/docs/docs_output/how-tos/how-to-create-a-ssl-certificate.html @@ -0,0 +1,786 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to create an SSL certificate +

+
    +
  1. +

    + Install + + acme.sh + +

    +
  2. +
  3. +

    + Configure the + + cloudflare API token + + (getting + + CF_Key + + and + + CF_Email + + from 1Password). +

    +
  4. +
  5. +

    + Run: +

    +
  6. +
+
# Let's Encrypt issuer
+# https://github.com/acmesh-official/acme.sh/wiki/Server
+acme.sh --issue --server letsencrypt --dns dns_cf -d <DOMAIN> --debug 2
+
+# then
+acme.sh --issue --server letsencrypt --dns dns_cf -d '*.<DOMAIN>' --debug 2
+
+
    +
  1. + Get certificate information (Optional) +
  2. +
+
openssl x509 -text -noout -in <cert>
+
+
    +
  1. +

    + Copy ceftificates +

    +
  2. +
  3. +

    + Use + + <DOMAIN>/fullchain.cer + + and + + <DOMAIN>/<DOMAIN>.key + + as the root certificate and private key. Usually copied then to + + base/root.cer + + and + + base/root.key + + . +

    +
  4. +
  5. + Also, use + + *.<DOMAIN>/fullchain.cer + + and + + *.<DOMAIN>/<DOMAIN>.key + + as the wildcard certificate and private key. Usually copied then to + + base/wildcard.cer + + and + + base/wildcard.key + + . +
  6. +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/index.html b/docs/docs_output/how-tos/index.html new file mode 100644 index 00000000..31d10174 --- /dev/null +++ b/docs/docs_output/how-tos/index.html @@ -0,0 +1,688 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/install-python-reqs-on-jnj-bastion.html b/docs/docs_output/how-tos/install-python-reqs-on-jnj-bastion.html new file mode 100644 index 00000000..e0fbf7a9 --- /dev/null +++ b/docs/docs_output/how-tos/install-python-reqs-on-jnj-bastion.html @@ -0,0 +1,700 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Install python requirements on bastion in JNJ +

+
wget --no-check-certificate https://bootstrap.pypa.io/pip/3.6/get-pip.py && python3 get-pip.py --user
+
+

+ Then, cd into the datacoves_deployment cloned repo folder, and run: +

+
pip install -r requirements.txt
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/list-code-server-pods-processes.html b/docs/docs_output/how-tos/list-code-server-pods-processes.html new file mode 100644 index 00000000..47384d8a --- /dev/null +++ b/docs/docs_output/how-tos/list-code-server-pods-processes.html @@ -0,0 +1,700 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ List python processes running on certain namespace's code server pods +

+
#!/bin/bash
+ns="dcw-dev001
+pods=$(kubectl -n $ns get pods | grep code-server | awk '{print $1, $8}')
+for pod in $pods; do
+  kubectl -n $ns exec -ti $pod -- bash -c 'ps auxwf' | grep python
+done
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/make-and-install-a-release.html b/docs/docs_output/how-tos/make-and-install-a-release.html new file mode 100644 index 00000000..77c36229 --- /dev/null +++ b/docs/docs_output/how-tos/make-and-install-a-release.html @@ -0,0 +1,780 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Make a new release +

+

+ To make a new release, from your development machine: +

+
cluster_domain=ensembletest.apps.jnj.com
+
+# Generate a new release.
+git checkout main
+git pull
+
+#  Check that images are properly created in Github Actions
+./cli.py generate_release
+release= # The name of the release just generated.
+
+# [If release is targeted to a submodule customer]
+#   Check if any there's any config change requirement
+./cli.py combined_release_notes     # Inspect the output to check for configuration changes
+
+# Update the cluster configuration to reference the new release.
+./cli.py set_release
+cd config/$cluster_domain/
+git secret reveal -f # Only required if you modified secrets.
+change configuration as required # Only required if you modified secrets.
+git secret hide      # Only required if you modified secrets.
+git add -A
+git diff --cached    # Review what will be commited.
+git commit
+git push
+
+# Commit and push the changes to datacoves.
+cd ../..
+git add -A
+git diff --cached
+git commit
+git push
+
+ + +

+ Apply the release to a cluster +

+ + +

+ Localhost +

+
./cli.py install
+
+ + +

+ JNJ +

+

+ For jnj there's a git repository, datacoves_deployment, that mirrors the structure of +the datacoves repo but only contains scripts and configuration, not sources. +

+

+ To deploy first update the mirror: +

+
# Clone if needed.
+mkdir -p ../jnj/asx-ahrx/datacoves_deployment
+git clone ssh://git@sourcecode.jnj.com:3268/asx-ahrx/datacoves_deployment.git ../jnj/asx-ahrx/datacoves_deployment
+
+# Rsync the installer files into the datacoves_deployment repo
+./cli.py rsync_installer ../jnj/asx-ahrx/datacoves_deployment/
+
+# Point the config submodule to the latest version.
+cd config/$cluster_domain/
+git pull
+cd ../..
+
+# Commit the changes.
+git add -A
+git diff --cached
+git commit
+
+

+ SSH into a jnj machine with kubectl access to the cluster. Then follow + + datacoves_deployment + + 's + + documentation + + to run the installation scripts. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/manage-profiles-and-image-sets.html b/docs/docs_output/how-tos/manage-profiles-and-image-sets.html new file mode 100644 index 00000000..7cf634ab --- /dev/null +++ b/docs/docs_output/how-tos/manage-profiles-and-image-sets.html @@ -0,0 +1,821 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Managing profiles and image sets +

+ + +

+ How to create and use a profile + image set? +

+ + +

+ 1. Create profile +

+

+ A profile is used to create a reusable preconfigured environment. +

+
    +
  1. + Navigate to + + profiles admin page + + and create a new one clicking on "Add Profile". +
  2. +
  3. + Review the checkboxes and uncheck the ones that are not appropiate, you might like to keep them all checked as suggested. +
  4. +
  5. + Add profile files accordingly. You might like to copy the exact same profile files configured on the + + default profile + + . +
  6. +
+ + +

+ 2. Create image set +

+

+ Image sets are associated to profiles and they are used to build the images that will end up being used by code-server and/or airflow. +

+
    +
  1. + Navigate to the + + Image set admin page + + and click on "Create new image set". +
  2. +
  3. + Choose the profile you just created in + + Profile + + . +
  4. +
  5. + Choose the release from where the new images are going to be based on, typically the last release. +
  6. +
  7. + Set the common python requirements for both airflow and code-server images in the + + Python requirements + + field. Take a look at the help text under the field. +
  8. +
  9. + Set the specific python requirements for airflow or code server in the fields + + Airflow requirements + + or + + Code server requirements + + . +
  10. +
  11. + Finally, configure the extensions you need installed in code-server by adding urls to the .vsix files in the + + Code server extensions + + field. +
  12. +
  13. + Hit "Save and continue editing". +
  14. +
  15. + Click on "Build image set" button in the top right corner of the form. A background process will be triggered to build the images. +
  16. +
  17. + Keep refreshing the page every 1 minute until the field + + Images + + get populated with the final images cooked. +
  18. +
+ + +

+ 3. Start using you profile +

+

+ Once you profile and image set are ready, you need to edit the environment you want to change and set the corresponding + + profile + + in such field. Environments are edited + + here + + . +

+

+ Change environment profile +

+ + +

+ 4. Reload the workbench page +

+

+ That's all, reload the page and don't forget to prepare your + + mate + + to enjoy your analytics journey even more ;) +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/move-a-gpg-secret-key.html b/docs/docs_output/how-tos/move-a-gpg-secret-key.html new file mode 100644 index 00000000..b0462c86 --- /dev/null +++ b/docs/docs_output/how-tos/move-a-gpg-secret-key.html @@ -0,0 +1,714 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to move a gpg secret key +

+

+ You should not reuse private gpg keys without thinking. However, it is more +convenient to have a single private key for your jnj email that is in all the +git secret keyrings of all the cluster config repos that you have access to. +

+

+ An easy way to transfer a key to a new installation server is to copy and paste +its base64: +

+
# From the machine that already has the key:
+gpg --list-secret-keys
+gpg --export-secret-key youremail@its.jnj.com | base64
+# Copy the output.
+
+
# From the installation machine:
+cat | base64 -d > key.asc
+# Paste and hit control D.
+gpg --import key.asc
+gpg --list-secret-keys
+rm key.asc
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/onboard-a-new-project-on-datacoves.html b/docs/docs_output/how-tos/onboard-a-new-project-on-datacoves.html new file mode 100644 index 00000000..05032f11 --- /dev/null +++ b/docs/docs_output/how-tos/onboard-a-new-project-on-datacoves.html @@ -0,0 +1,849 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ 1. Create service accounts on snowflake (manually). +

+
    +
  • + svc_datacoves: to change user private key +
  • +
  • + svc_orchestration: airflow jobs +
  • +
  • + svc_loader: airbyte/fivetran jobs +
  • +
  • + svc_continuous_integration: CI jobs +
  • +
  • + svc_business_intelligence: BI tool connection (optional) +
  • +
  • + svc_business_intelligence_pii: BI tool connection for PII data (optional) +
  • +
+ + +

+ 2. Create user accounts on snowflake (manually) +

+ + +

+ 3. New project on appdevtools (on JnJ): +

+
    +
  • + Bitbucket +
  • +
  • + Jenkins +
  • +
  • + Confluence +
  • +
+ + +

+ 4. Configure git service account access to repo +

+ + +

+ 5. Add SQL hook and template to set users private key on snowflake +

+ + +

+ 6. Create git repo structure using balboa repo as a reference: +

+
    +
  • + load +
  • +
  • + orchestrate +
  • +
  • + automate +
  • +
  • + dbt +
  • +
  • + profiles.yml +
  • +
  • + sample_blue_green.py +
  • +
  • + docs +
  • +
  • + secure +
  • +
  • + .gitignore +
  • +
+

+ Depending on CI: +

+
    +
  • + .github +
  • +
  • + .gitlab-ci.yml +
  • +
  • + Jenkinsfile +
  • +
+

+ CI job deploy to prod that: +

+
    +
  • + generate dbt docs on dbt-docs branch +
  • +
  • + runs dbt build on prod + CI job on PR that: +
  • +
  • + validate branch names +
  • +
  • + run pre-commit hooks +
  • +
+ + +

+ 7. Add airbyte connection on airflow +

+ + +

+ 8. Add new branch “airflow_ + + ” for every env that is not + + production + + +

+ + +

+ 9. New dbt-docs branch +

+ + +

+ 10. Jenkins configuration +

+
    +
  • + Git SA +
  • +
  • + Snowflake SA +
  • +
+ + +

+ 11. Enable dbt-docs once index.html was placed on dbt-docs branch +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/prometheus-queries.html b/docs/docs_output/how-tos/prometheus-queries.html new file mode 100644 index 00000000..4ac4a454 --- /dev/null +++ b/docs/docs_output/how-tos/prometheus-queries.html @@ -0,0 +1,744 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Useful prometheus queries +

+ + +

+ node status with pressure +

+
sum by(node) (kube_node_status_condition{status="true", condition="DiskPressure"}) +
+sum by(node) (kube_node_status_condition{status="true", condition="MemoryPressure"}) +
+sum by(node) (kube_node_status_condition{status="true", condition="PIDPressure"})
+
+ + +

+ pods memory filtering by pod name with regex +

+
sum by(pod) (container_memory_usage_bytes{namespace="<NAMESPACE>", pod=~"<PREFIX>.*"})
+
+ + +

+ containers cpu usage by node +

+
sum by(node) (rate(container_cpu_usage_seconds_total{node="<NODE>"}[5m]))
+
+ + +

+ Node memory +

+
node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100
+
+ + +

+ Loki ingester chunk stored size +

+
loki_ingester_chunk_stored_bytes_total{job="loki"}
+
+ + +

+ Pods killed bec exceeding memory limit +

+
sum by(pod) (kube_pod_container_status_terminated_reason{reason="OOMKilled", namespace="dcw-prd001"})
+
+ + +

+ Total worker nodes (measued by nodes running airflow worker pods) +

+
count (sum by (node) (kube_pod_info and on (pod) kube_pod_labels{label_airflow_worker!=""}) > 0)
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/q-and-a.html b/docs/docs_output/how-tos/q-and-a.html new file mode 100644 index 00000000..d6550b0e --- /dev/null +++ b/docs/docs_output/how-tos/q-and-a.html @@ -0,0 +1,716 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Questions and Answers +

+

+ These are simple items that don't necessarily fit in elsewhere or need their own articles. +

+ + +

+ How do I start codeserver without validating the git repository credentials? +

+

+ Code servers use User Repository settings, and currently User Repositories only work with SSH keys. Sometimes, this is hard to deal with; if we can only use https authentication (i.e. from within J&J pulling an external repository) and we need a work-around. +

+

+ The workaround is simple; go to the Django panel. +

+

+ Pick User Repositories +

+

+ Pick the correct User Repository for your user and repo. +

+

+ Put a date and time in the "validated at" field and save it. So long as that isn't blank, it will allow you to start code server. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/recover-disk-on-aks.html b/docs/docs_output/how-tos/recover-disk-on-aks.html new file mode 100644 index 00000000..b331adda --- /dev/null +++ b/docs/docs_output/how-tos/recover-disk-on-aks.html @@ -0,0 +1,942 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Recover disk (PV) from Azure Kubernetes Service. +

+

+ This guide describes how to move a disk from one Kubernetes cluster to another cluster. + + More info + +

+

+ Steps: +

+
    +
  1. + + Edit old pvc to Retain policy. + +
  2. +
  3. + + Get PV name. + +
  4. +
  5. + + Delete PVC to release the PV in the old cluster. + +
  6. +
  7. + + Move the PV resource to new cluster using az cli. + +
  8. +
  9. + + Delete the PVC in the new cluster. + +
  10. +
  11. + + Create the PV and PVC in the new cluster. + +
  12. +
+ + +

+ Edit old pvc to Retain policy +

+

+ The + + persistent volume (PV) + + that are created for + + code server + + has the delete policy, that means that when a disk is unbounded it is automatically deleted, therefore this policy must be modified to + + Retain + + . +

+
# Get the persistent volumes. E.g:
+kubectl get pv
+
+# Edit the persistent volume. E.g:
+kubectl patch pv pvc-2552cd9b-8231-409d-8b4b-a9d047415b53 -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'
+
+ + +

+ Get PV name +

+
# Get the persistent volumes. E.g:
+kubectl get pv
+
+ + +

+ Delete PVC to release the PV in the old cluster +

+

+ It is necessary to remove the + + persistent volume claim (PVC) + + to release the + + persistent volume (PV) + + . +

+
# Get the persistent volumes. E.g:
+kubectl -n dcw-dev123 get pvc
+
+# Edit the persistent volume. E.g:
+kubectl -n dcw-dev123 delete pvc code-server-bru-10-config-volume
+
+ + +

+ Move the PV resource to new cluster using az cli +

+
    +
  1. + Get the + + cluster name + + and + + subcription id + + . +
  2. +
+

+ Grant permissions in Grafana +

+
    +
  1. + Get the node resources groups. We will need the origin and destination. +
  2. +
+
# Get the node resources group
+az aks show --resource-group <name-resource-group> --name <cluser-node> --query nodeResourceGroup -o tsv
+
+
    +
  1. + Get the id disk. +
  2. +
+
# Get the origien node resource group. E.g:
+az disk list --resource-group <node-resouorce-group>
+
+

+ Grant permissions in Grafana +

+
    +
  1. + Move the disk. +
  2. +
+
az resource invoke-action --action moveResources --ids "/subscriptions/<origin-subcription-id>/resourceGroups/<origin-node-resource-group>" --request-body "{  \"resources\": [\"<disk_id>\"],\"targetResourceGroup\":\"/subscriptions/<destination-subcription-id>/resourceGroups/<destination-node-resource-group>\" }"
+
+ + +

+ Delete the PVC in the new cluster. +

+

+ This step is only necessary if the + + persistent volume claim (PVC) + + already exists. +

+
# Get the persistent volumes. E.g:
+kubectl -n dcw-dev123 get pvc
+
+# Edit the persistent volume. E.g:
+kubectl -n dcw-dev123 delete pvc code-server-bru-10-config-volume
+
+ + +

+ Create the PV and PVC in the new cluster +

+

+ Create the following file + + pvc.yaml + + with the names and namespace correct. +

+
    +
  • + + pv-name + + : E.g: + + pvc-2581bfb0-b56a-4fbd-b302-67cf0ab43499 + +
  • +
  • + + pvc-name + + : If you deleted the pvc, the name should be the same. E.g: + + code-server-bru-10-config-volume + +
  • +
  • + + namespace + + : Kubernetes namespace to be applied. +
  • +
  • + + disk-id-full-path + + : E.g: + + /subscriptions/91bd2205-0d74-42c9-86ad-41cca1b4822b/resourceGroups/MC_datacoves_east-us-a_eastus/providers/Microsoft.Compute/disks/pvc-fddcd2fc-7d35-40e9-b631-49c64bd87cbf + +
  • +
+
apiVersion: v1
+kind: PersistentVolume
+metadata:
+  name: <pv-name>
+spec:
+  capacity:
+    storage: 20Gi
+  accessModes:
+    - ReadWriteOnce
+  persistentVolumeReclaimPolicy: Retain
+  storageClassName: default
+  csi:
+    driver: disk.csi.azure.com
+    readOnly: false
+    volumeHandle: <disk-id-full-path>
+
+---
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+  name: <pvc-name>
+  namespace: <namespace>
+spec:
+  accessModes:
+    - ReadWriteOnce
+  resources:
+    requests:
+      storage: 20Gi
+  volumeName: <pv-name>
+  storageClassName: default
+
+

+ Create the resources in Kubernetes +

+
kubectl apply -f pvc.yaml
+
+# Check the resources
+kubectl get pvc | grep <pv-name> # pvc-2552cd9b-8231-409d-8b4b-a9d047415b53
+kubectl -n dcw-dev123 get pvc code-server-bru-10-config-volume
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/register-github-self-hosted-runner.html b/docs/docs_output/how-tos/register-github-self-hosted-runner.html new file mode 100644 index 00000000..55e46252 --- /dev/null +++ b/docs/docs_output/how-tos/register-github-self-hosted-runner.html @@ -0,0 +1,767 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Self hosted Github Runner +

+
    +
  1. + Create new runnner + + in Github + + . You must have + + Owner + + privileges. +
  2. +
  3. + Create a virtual machine, e.g. in Azure, and run the scritps that Github gave you on the previous step. +
  4. +
  5. + Install dependencies on the machine you created +
  6. +
+
# Update and Upgrade
+sudo apt-get update
+sudo apt-get upgrade -y
+
+# Add Kubernetes repository and key
+curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
+echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
+
+# Add Helm repository and key
+curl https://baltocdn.com/helm/signing.asc | sudo apt-key add -
+echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list
+
+# Update package list again after adding the Kubernetes and Helm repositories
+sudo apt-get update
+
+# Install software/packages
+sudo apt-get install -y apt-transport-https gnupg2 kubectl tmux python3-pip docker.io golang helm
+
+# Python symbolic link
+sudo ln -s /usr/bin/python3 /usr/bin/python
+
+# Docker post-installation step for the current user
+sudo usermod -aG docker $USER
+
+# Go and kind installation
+go install sigs.k8s.io/kind@v0.20.0
+sudo ln -s /home/datacoves/go/bin/kind /usr/local/bin/kind
+
+
    +
  1. + run + + tmux + + to do not close the session when detached from ssh connection. +
  2. +
  3. + Follow any instruction you got from Github on step 1 and install the runner as a service: + + sudo ./svc.sh install datacoves + +
  4. +
  5. + Boost inotify limits for system performance. Update the following values in the specified files: +
    ```Boost inotify limits for system performance. Update the following values in the specified files:
    +~$ cat /proc/sys/fs/inotify/max_user_instances
    +1024
    +~$ cat /proc/sys/fs/inotify/max_user_watches
    +524288
    +~$ cat /proc/sys/fs/inotify/max_queued_events
    +16384
    +```
    +
    +
  6. +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/release-notes.html b/docs/docs_output/how-tos/release-notes.html new file mode 100644 index 00000000..d94b6770 --- /dev/null +++ b/docs/docs_output/how-tos/release-notes.html @@ -0,0 +1,763 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Statement of Purpose +

+

+ The purpose of this document is to describe the process by which we manage release notes to deliver to our customers. +

+ + +

+ Source of Authority +

+

+ Release notes all come from Github: +

+

+ https://github.com/datacoves/datacoves/releases +

+

+ The notes begin live as auto-generated notes that are created when the release branch is built. Then, we hand-edit the release notes to match the following format: +

+
Breaking Changes
+* Items that are breaking changes, in list.
+
+New Features
+* New features, in list.
+
+Enhancements
+* Enhancements to old features, in list
+
+Fixes
+* Bug fixes, in list
+
+Under the Hood
+* Notes relevant to us internally which we would like to keep, but not important to customers.
+
+**Full Changelog**: This is a URL that is provided automatically, just leave it in the change log.
+
+ + +

+ Generating Release Notes +

+

+ Release notes are generated per-customer and have all the changes from their current release to the latest release you currently have downloaded in your 'releases' folder. Make sure you have the customer's cluster configuration checked out into your 'config' directory; if you do not, stop and ask for help before continuing. +

+

+ You can control which release notes are generated; make sure you have downloaded the releases first: +

+
./cli.py download_releases
+
+

+ If desired or necessary, you can delete files out of your 'releases' directory; for instance, if the customer is getting updated to the latest 2.2 series release but there are 2.3 series releases available, you could delete all the 2.3 release files out of your 'releases' directory and notes for those releases will not be produced. +

+

+ Release notes are then generated using the + + cli.py + + thusly: +

+
./cli.py combined_release_notes
+
+

+ It will make a file + + combined.md + + in the same directory as + + cli.py + + , and that will have the combined release notes for all the releases involved. This file can then be delivered to the customer as part of the announcement to upgrade them. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/request-access-to-a-cloud-pc-on-kenvue.html b/docs/docs_output/how-tos/request-access-to-a-cloud-pc-on-kenvue.html new file mode 100644 index 00000000..f65cc9fc --- /dev/null +++ b/docs/docs_output/how-tos/request-access-to-a-cloud-pc-on-kenvue.html @@ -0,0 +1,712 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to request access to a cloud PC on Kenvue +

+
    +
  1. +

    + Navigate to this + + form + + . +

    +
  2. +
  3. +

    + Complete it accordingly: +

    +
  4. +
+

+ Kenvue Cloud PC +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/reset-datahub.html b/docs/docs_output/how-tos/reset-datahub.html new file mode 100644 index 00000000..12371408 --- /dev/null +++ b/docs/docs_output/how-tos/reset-datahub.html @@ -0,0 +1,757 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Resetting Datahub +

+

+ Datahub uses PostgreSQL, ElastiCache, and Kafka. If any of these three things gets out of sync for any reason, Datahub will behave very strangely. For instance, it will claim secrets exist but not show them up in the UI. +

+

+ In such an event, you will need to reset Datahub. This can be done with the following steps: +

+

+ In all these examples, replace + + xxx + + with the slug (such as dev123). +

+ + +

+ Turn Off Datahub +

+

+ Go to the environment you wish to reset, and disable Datahub. Save and sync the environment and wait until Datahub come offline by monitoring the Datahub pods: +

+
kubectl get pods -n dcw-xxx | grep datahub
+
+

+ This will take awhile. +

+ + +

+ Delete Metadata in PostgreSQL +

+
./cli.py pod_sh
+./manage.py dbshell
+\c xxx_dh
+drop table metadata_aspect_v2
+
+ + +

+ Delete Persistent Volume Claims +

+
kubectl delete pvc -n dcw-xxx elasticsearch-master-elasticsearch-master-0
+kubectl delete pvc -n dcw-xxx data-xxx-kafka-broker-0
+kubectl delete pvc -n dcw-xxx data-xxx-kafka-zookeeper-0
+
+ + +

+ Verify Persistent Volumes are deleted +

+
kubectl get pv -n dcw-xxx | grep xxx | grep elasticsearch
+kubectl get pv -n dcw-xxx | grep xxx | grep kafka
+
+

+ These should show no results. These should delete automatically when the PVC is deleted, make sure they are gone. +

+ + +

+ Re-enable Datahub +

+

+ Go back to the environment, turn Datahub back on, and re-sync. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/security-vulnerabilities-fix.html b/docs/docs_output/how-tos/security-vulnerabilities-fix.html new file mode 100644 index 00000000..2f67fcfb --- /dev/null +++ b/docs/docs_output/how-tos/security-vulnerabilities-fix.html @@ -0,0 +1,734 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to run security vulnerabilities check and fix them +

+ + +

+ React app +

+ + +

+ Install +

+
yarn add yarn-audit-fix -D
+
+ + +

+ Run +

+
yarn-audit-fix
+
+

+ Learn more: https://yarnpkg.com/package?name=yarn-audit-fix +

+ + +

+ Django app +

+ + +

+ Install +

+
pip install pip-audit
+
+ + +

+ Run +

+
pip-audit -r ./requirements.txt
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/set-maintenance-mode.html b/docs/docs_output/how-tos/set-maintenance-mode.html new file mode 100644 index 00000000..680af83c --- /dev/null +++ b/docs/docs_output/how-tos/set-maintenance-mode.html @@ -0,0 +1,703 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to set the cluster in "Maintenance Mode" +

+

+ Turning it on: +

+
./cli.py set_maintenance_mode <kubectl context> <cluster domain> "on" "today at 9PM UTC" "support@datacoves.com" "our Support Team"
+
+

+ Turning it off: +

+
./cli.py set_maintenance_mode <kubectl context> <cluster domain> "off"
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/setup-oauth-on-azure.html b/docs/docs_output/how-tos/setup-oauth-on-azure.html new file mode 100644 index 00000000..a306618c --- /dev/null +++ b/docs/docs_output/how-tos/setup-oauth-on-azure.html @@ -0,0 +1,800 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to set up oAuth authentication on Azure +

+
+

+ + NOTE: + + This guide was based on this + + Auth0 help page + + , it could require some adjustments. +

+
+

+ This is done using Azure AD / Entra ID apps. +

+ + +

+ Register new app +

+
    +
  1. + Navigate to App registrations on Azure Portal +
  2. +
+

+ App registrations +

+
    +
  1. + Register a new App, choosing a name, selecting "Accounts in this organizational directory only (Datacoves Inc. only - Single tenant)" +and providing a redirect url in the form of "https://api.{cluster_domain}/complete/azuread-tenant-oauth2" +
  2. +
+

+ Register new app +

+
    +
  1. + Once created, get the client id and tenant id from the overview page +
  2. +
+

+ Client ID and Tenant Id +

+ + +

+ Generate Client Secret +

+

+ Navigate to 'Certificates & Secrets' and Generate a new client secret +

+

+ Client secret +

+

+ Keep the value safe. +

+ + +

+ Configure permissions +

+

+ Navigate to app permissions and then 'Add permissions'. Select 'Microsoft Graph', then 'Delegated permissions', and the following OpenId permissions. +

+

+ App permissions +

+

+ Also add permissions to read groups memberships if they're going to be used to determine permissions in Datacoves. +

+

+ App permissions +

+

+ Finally, consent as an Admin the permissions granted by clicking on this button: +

+

+ Admin consent +

+ + +

+ Configure token +

+

+ We need to include the groups claim in both the ID and access token, to do so, go to Token configuration: +

+

+ Token configuration +

+

+ Click on "Add groups claim", select "Security groups", make sure "Group ID" is selected in both ID and Access tokens and click on Add. +

+

+ Add group claim +

+ + +

+ Configure Datacoves +

+

+ Configure the Client ID, Tenant ID and Client Secret accordingly on Datacoves using the env variables AZUREAD_CLIENT_ID, AZUREAD_TENANT_ID, and AZUREAD_CLIENT_SECRET. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/setup-s3-for-dbt-api.html b/docs/docs_output/how-tos/setup-s3-for-dbt-api.html new file mode 100644 index 00000000..64955e16 --- /dev/null +++ b/docs/docs_output/how-tos/setup-s3-for-dbt-api.html @@ -0,0 +1,774 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Create a S3 bucket for dbt api artifacts +

+ + +

+ Create bucket on AWS console +

+
    +
  • + Create an S3 bucket. +
  • +
  • + Choose a bucket name, we suggest using + + _dbt_api where + + could be + + ensemble + + , + + ensembletest + + , etc. + + +
  • +
  • + Create an IAM user with a policy to access the bucket, like the one below, + replacing + + {your_bucket_name} + + with your bucket's name. +
  • +
  • + Create an access key for the user. Share it with the Datacoves team. +
  • +
+
{
+  "Version": "2012-10-17",
+  "Statement": [
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:PutObject",
+        "s3:GetObject",
+        "s3:GetObjectVersion",
+        "s3:DeleteObject",
+        "s3:DeleteObjectVersion"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}/*"
+    },
+    {
+      "Effect": "Allow",
+      "Action": [
+        "s3:ListBucket",
+        "s3:GetBucketLocation"
+      ],
+      "Resource": "arn:aws:s3:::{your_bucket_name}"
+    }
+  ]
+}
+
+ + +

+ Configure Datacoves accordingly +

+

+ For the cluster being configured, set the following environment variables in the + + core-dbt-api.env + + file: +

+
STORAGE_ADAPTER=s3
+S3_BUCKET_NAME=fill_in
+S3_ACCESS_KEY=fill_in
+S3_SECRET_ACCESS_KEY=fill_in
+S3_REGION=fill_in
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/testing-alerts.html b/docs/docs_output/how-tos/testing-alerts.html new file mode 100644 index 00000000..076cc164 --- /dev/null +++ b/docs/docs_output/how-tos/testing-alerts.html @@ -0,0 +1,774 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to create and test alerts +

+ + +

+ Stack +

+
    +
  • + Alert Manager +
  • +
  • + Loki Alert Ruler +
  • +
  • + Grafana +
  • +
+ + +

+ Test Loki Alert +

+
    +
  1. + Add the new alert on + + scripts/data/loki-rules.yaml + + file. +
  2. +
  3. + Install + + Observability Stack + + . +
  4. +
  5. + Force some logs. +
  6. +
+

+ Example: +

+
# Option 1
+kubectl -n core exec -it api-75567b8958-7b7rx -- bash
+
+# Option 2
+./cli.py pod_sh
+
+./manage.py shell_plus
+
+
import requests
+import time
+
+payload = {
+  "streams": [
+    {
+      "stream": {
+        "agent_hostname": "eventhandler",
+        "job": "test",
+        "namespace": "core"
+      },
+      "values": [[ str(int(time.time() * 1e9)), "max node group size reached" ]]
+    }
+  ]
+}
+
+requests.post(
+  url="http://loki-loki-distributed-gateway.prometheus.svc.cluster.local/loki/api/v1/push",
+  json=payload,
+  headers={"Content-Type": "application/json"}
+)
+
+
    +
  1. + Now you can see the alert on + + Cluster Alerts + +
  2. +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.html b/docs/docs_output/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.html new file mode 100644 index 00000000..05af0bda --- /dev/null +++ b/docs/docs_output/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.html @@ -0,0 +1,711 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to trigger a cloudx pipeline manually after changing cluster.yml on a kenvue cluster +

+
    +
  1. + Go to the bastion +
  2. +
  3. + Run the curl command you can find in 1Password named + + Run cloudx pipelines using curl on Kenvue clusters + +
  4. +
  5. + Not that the + + Branch + + queryparam references the repo branch you changed. +
  6. +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/update-kubernetes-and-datacoves.html b/docs/docs_output/how-tos/update-kubernetes-and-datacoves.html new file mode 100644 index 00000000..4accb9b6 --- /dev/null +++ b/docs/docs_output/how-tos/update-kubernetes-and-datacoves.html @@ -0,0 +1,1268 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Statement of Purpose +

+

+ The purpose of this document is to describe common upgrade procedures for both updating Kubernetes and updating Datacoves on customer clusters. +

+ + +

+ Updating Kubernetes +

+

+ The procedure varies for Azure vs. AWS. We generally prefer to use the web console to do the upgrade. +

+ + +

+ Gain Kubernetes command line access to the cluster +

+

+ Make sure you are set up for Kubernetes command line access. +

+
    +
  • + For Orrum the instructions are here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum +
  • +
+

+ Access whatever VPN is necessary. Switch to the correct Kubernetes context: +

+
kubectl config get-contexts
+kubectl config use-context context-name
+
+

+ If you aren't set up to do this, stop now and get help. +

+ + +

+ Disable Sentry Alarms +

+

+ Sentry is going to complain very loudly about all this. +

+

+ Currently, it looks like there is no way to disable this without the Sentry Business Plan which we do not have. But if that ever changes, we'll update this section. + + For now, there is nothing to do. + +

+ + +

+ Check and Prepare PDB's +

+

+ The Kubernetes PDBs can cause an upgrade to hang, as it will prevent a pod from shutting down to receive the update. Check the PDBs like this: +

+
kubectl get pdb -A
+
+

+ You will get an output similar to: +

+
NAMESPACE       NAME                           MIN AVAILABLE   MAX UNAVAILABLE   ALLOWED DISRUPTIONS   AGE
+calico-system   calico-typha                   N/A             1                 1                     273d
+core            api                            1               N/A               0                     232d
+core            beat                           1               N/A               0                     232d
+core            redis                          1               N/A               0                     232d
+core            workbench                      1               N/A               0                     232d
+core            worker                         1               N/A               0                     232d
+dcw-dev123      dev123-airflow-scheduler-pdb   N/A             1                 1                     26h
+dcw-dev123      dev123-airflow-webserver-pdb   N/A             1                 1                     26h
+kube-system     coredns-pdb                    1               N/A               1                     273d
+kube-system     konnectivity-agent             1               N/A               1                     273d
+kube-system     metrics-server-pdb             1               N/A               1                     273d
+
+

+ Note the core namespace clusters with ALLOWED DISRUPTIONS at 0. You will need to patch those so that they will allow a disruption, and then revert the patch when done. +

+

+ The following commands will allow for a disruption: +

+
kubectl patch pdb -n core api -p '{"spec":{"minAvailable":0}}'
+kubectl patch pdb -n core beat -p '{"spec":{"minAvailable":0}}'
+kubectl patch pdb -n core redis -p '{"spec":{"minAvailable":0}}'
+kubectl patch pdb -n core workbench -p '{"spec":{"minAvailable":0}}'
+kubectl patch pdb -n core worker-long -p '{"spec":{"minAvailable":0}}'
+kubectl patch pdb -n core worker-main -p '{"spec":{"minAvailable":0}}'
+kubectl patch pdb -n core dbt-api -p '{"spec":{"minAvailable":0}}'
+kubectl patch pdb -n prometheus cortex-tenant -p '{"spec":{"minAvailable":0}}'
+
+

+ You can apply this to any other PDBs that prevent disruptions. + + Take note of all the PDBs that you altered in this fashion. + +

+ + +

+ Upgrade Kubernetes +

+

+ This varies based on the cloud provider. +

+ + +

+ On Azure +

+

+ Go to: +

+

+ https://portal.azure.com/#view/HubsExtension/BrowseResource/resourceType/Microsoft.ContainerService%2FmanagedClusters +

+

+ Make sure you are logged into the correct client account (check the upper right corner). +

+

+ Locate the cluster you want to work with. Often you will have to alter the default filters so that "Subscription equals all". +

+

+ Pick the cluster you are updating. If you are not sure which one, ask. +

+

+ On the overview screen that comes up by default, you will see "Kubernetes version" in the upper right area. Click the version number. +

+

+ It will show version details; click Upgrade Version. +

+
    +
  • + Pick Automatic upgrade: Enabled with patch (recommended) +
  • +
  • + Pick Kubernetes version: the version you wish to upgrade to +
  • +
  • + Pick upgrade scope: Upgrade control plane + all node pools +
  • +
  • + Click save +
  • +
+

+ The upgrade will start in a few moments. +

+ + +

+ Wait for it to come back +

+

+ The update can take quite awhile. Keep an eye on the pods and watch them update: +

+
kubectl get pods -A
+
+

+ You will see a lot of activity, pods shutting down and restarting. Once it's all back online, you can restore the PDBs (see next step) and you can verify the update (see bottom of this file). +

+ + +

+ Restore PDB's +

+

+ We need to put the PDB's back in place. +

+
kubectl get pdb -A
+
+

+ You will get an output similar to: +

+
NAMESPACE       NAME                           MIN AVAILABLE   MAX UNAVAILABLE   ALLOWED DISRUPTIONS   AGE
+calico-system   calico-typha                   N/A             1                 1                     273d
+core            api                            0               N/A               1                     232d
+core            beat                           0               N/A               1                     232d
+core            redis                          0               N/A               1                     232d
+core            workbench                      0               N/A               1                     232d
+core            worker                         0               N/A               1                     232d
+dcw-dev123      dev123-airflow-scheduler-pdb   N/A             1                 1                     26h
+dcw-dev123      dev123-airflow-webserver-pdb   N/A             1                 1                     26h
+kube-system     coredns-pdb                    1               N/A               1                     273d
+kube-system     konnectivity-agent             1               N/A               1                     273d
+kube-system     metrics-server-pdb             1               N/A               1                     273d
+
+

+ The following commands will re-enable the PDBs: +

+
kubectl patch pdb -n core api -p '{"spec":{"minAvailable":1}}'
+kubectl patch pdb -n core beat -p '{"spec":{"minAvailable":1}}'
+kubectl patch pdb -n core redis -p '{"spec":{"minAvailable":1}}'
+kubectl patch pdb -n core workbench -p '{"spec":{"minAvailable":1}}'
+kubectl patch pdb -n core worker-main -p '{"spec":{"minAvailable":1}}'
+kubectl patch pdb -n core worker-long -p '{"spec":{"minAvailable":1}}'
+kubectl patch pdb -n core dbt-api -p '{"spec":{"minAvailable":1}}'
+kubectl patch pdb -n prometheus cortex-tenant -p '{"spec":{"minAvailable":1}}'
+
+

+ Also restore any additional PDBs you had to disable in the prior step. +

+ + +

+ Updating DataCoves +

+

+ Updating DataCoves is relatively simple. However, some of the access details can be compllicated. +

+ + +

+ First Time Setup: Set Up Deployment Environment and Get Needed Access +

+

+ J&J, Kenvue, and Orrum have some complexity around access. AKS access is relatively easy. These are one-time steps you need to take to get access to each environment. +

+ + +

+ AKS +

+

+ Accessing AKS is documented here: https://github.com/datacoves/datacoves/blob/main/docs/how-tos/administrate-east-us-a-aks-cluster.md +

+

+ Installation is done using your development system's checked out copy of the Datacoves repository. AKS' configuration repository is located at: https://github.com/datacoves/config-datacoves-east-us-a and should be checked out into your 'config' directory. +

+ + +

+ Orrum +

+

+ Accessing Orrum is documented here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum +

+

+ Installation is done using your development system's checked out copy of the Datacoves repository. Note that Orrum requires a VPN, but the access is described above. Orrum's configuration repository is here: https://github.com/datacoves/config-datacoves-orrum and must be checked out into your 'config' directory. +

+ + +

+ CCS +

+

+ To access CCS, your Datacoves account must be added to CCS' Azure organization. Eugine Kim can assist with this. +

+

+ Then, you must download and install the Azure VPN client. For Macs, this is done through the Apple Store. +

+

+ And finally, you need the Azure command line tools which you probably already have installed if you followed our README instructions for setting up this repository. You should also be logged into Azure with + + az login + + . +

+

+ Then, on the VPN, you can shell into the Bastion as follows: +

+
az ssh vm --subscription 3099b8af-7ca1-4ff4-b9c5-1960d75beac7 ssh vm --ip 10.0.2.4
+
+

+ Once on the Bastion, the tools are installed with Linux Brew: So, edit your + + .bashrc + + file in your home directory with your favorite editor and add this to the end: +

+
eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
+
+

+ Log out and log back in. + + python3 --version + + should reveal a modern + + 3.1x + + python version. +

+

+ From this point, it is simply check out the datacoves repository and do the installation like any other system. +

+ + +

+ J&J / Kenvue +

+

+ J&J access is complex; going into the details of all the setup is out of the scope of this documentation. However, we will cover how to get set up on the bastion so you can get to work. +

+

+ It is a good idea to read this documentation if you haven't already: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/jnj +

+

+ In order to do deployments in J&J or Kenvue, you have to do the work from a bastion server, which is a Linux machine accessible via your Cloud PC. J&J and Kenvue have different bastions, however configuring them is basically the same. +

+

+ The IP address for the J&J Bastion is: + + 10.157.82.138 + + and the IP address for the Kenvue bastion is: (... I am unable to log into Kenvue right now! Great!) +

+

+ I make a + + .bat + + file that runs + + ssh IP + + where the IP is the one above. +

+

+ Once you log into the bastion, there's a few things to note: +

+
    +
  • + You can sudo to root thusly: + + sudo su - + + . Any other + + sudo + + command will not work, you can only + + sudo su - + + . +
  • +
  • + The default home directory you log into on the bastion does not have much disk space, so we use a volume mount on + + /app + + for most of our work. +
  • +
  • + We use + + brew + + to manage packages. +
  • +
+

+ To get set up initially, take the following steps: +

+ + +

+ Copy base configuration +

+

+ + cp -R /app/users/datacoves-home-template/. ~/ + +

+ + +

+ Add brew to your bash rc +

+

+ Edit your + + .bashrc + + file in your home directory with your favorite editor and add this to the end: +

+
eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv)
+
+

+ Log out and log back in. + + python3 --version + + should reveal a modern + + 3.1x + + python version. +

+ + +

+ Login to Kubernetes +

+
kubectl config get-contexts
+
+ + +

+ Set up your deployment repository +

+
sudo su -
+mkdir -p /app/users/$USER
+chown -R $USER /app/users/$USER
+exit
+cd /app/users/$USER
+git clone https://github.com/datacoves/datacoves.git
+cd datacoves
+python3 -m venv .venv
+source .venv/bin/activate
+pip3 install -r requirements.txt
+
+ + +

+ Set up your configuration repository +

+

+ For each environment you will deploy to, you need to check out its config repository into your 'configs' directory. The list of repositories is here: +

+

+ https://github.com/datacoves/datacoves/blob/main/docs/client-docs/jnj/1-cluster-requirements.md +

+ + +

+ Before Deployment: Create your Plan +

+

+ Before a deployment is done, you must first check to see if there's any special installation steps. I use a Word document template, and I update it according to each release adding any special steps that I need to. Then I print it out and use it as a physical check list. My template file is + + here + + . +

+

+ First, look at the version of the cluster you will be updating. You can get this version from the cluster-params.yaml. The easiest way to do this is to check the difference between two versions in GitHub. Here's an example of a comparison between two versions: +

+

+ https://github.com/datacoves/datacoves/compare/v3.2.202410250048...v3.2.202411140044 +

+

+ Look at all the pull requests that are in your new releae and check to see if you have any that are labeled "special release step" and add any special steps to your release document. Post your finished work on the Slack dev channel for commentary. +

+ + +

+ Perform the installation +

+

+ Release documentation is here: https://www.notion.so/datacoves/Release-Instructions-1b5ea827f87280f98620dccc1600727c + + Be very sure you are releasing from the correct release branch + + . You need to release from the tag you are releasing. You can check out a tag thusly: +

+
git fetch -a
+git checkout refs/tags/v1.2.34234523452524
+
+

+ Replace the tag name with the version you are deploying. If you deploy from main or the wrong branch, you risk using installation scripts that are newer and have features that aren't supported yet by the images you are edeploying. +

+ + +

+ How to run migrations on a stuck install process +

+

+ Sometimes migrations do not run automatically because the new pod containing the migrations fails before they can be applied. When this occurs we need to execute them manually. So we need to remove the + + LivenessProbe + + and + + ReadinessProbe + + , this makes the new pod run correctly and allows us to enter it and execute the migrations ourselves. +

+
kubectl patch deployments -n core api -p '{"spec": {"template": {"spec": {"containers":[{"name": "api", "livenessProbe": null, "readinessProbe": null}]}}}}'
+
+

+ When the pod run correctly. +

+
kubectl -n core get pods
+kubectl -n core exec -it api-<hash> -- bash
+./manage.py migrate
+
+ + +

+ Create Profile Image Set for New Release +

+

+ This may be necessary if an error about Profile Image Sets occurs; it is a bit of a chicken and the egg problem, as the release needs to exist prior to creating the profile image set, but the release won't exist until the install process is attempted. +

+

+ Log into the customer's API panel. +

+
    +
  • + Orrum's is: https://api.datacoves.orrum.com/panel +
  • +
  • + CCS' is: https://api.datacoves.cssperfusion.com/panel +
  • +
+

+ Under "Projects" pick "Profile Image Sets". Go to the existing Profile Image Set for the old release, and copy / paste the 4 JSON blocks into an editor. Take a note of what is in the 'profile' field. +

+

+ Go back to the listing of Profile Image Sets and click + + + Add profile image set + + in the corner. Make the profile the same as the previous release's, and choose the new release from the release select box. +

+

+ Then, paste in the four JSON blocks into the new Profile Image Set. Check your release YAML file in + + releases + + and note the section 'code_server_libraries'; compare that to the Python libraries in the profile image set. Update versions as needed, but never downgrade. There's no need to add libraries that are in the release YAML but not in the profile image entry. +

+

+ Also check 'code_server_extensions' against 'code server extensions' and apply the same logic to update extensions that are in the Profile Image Set. +

+

+ Save the new profile image set, and making sure to keep all the data from the old profile image set just in case you need it, go back into that one and delete it. +

+

+ You can now re-run installation and it should get past this error. +

+ + +

+ Verify Installation +

+

+ Verifying the installation is the same no matter what process you're engaging in with DataCoves clusters, be it a Kubernetes update or a DataCoves update. +

+
    +
  • + Make sure no helm chart failed and retry if needed: + + ./cli.py retry_helm_charts + +
  • +
  • + Log into the customer's API panel and make sure that is working. +
  • +
  • + Log into the customer's launchpad and make sure that is working. +
  • +
  • + Pick one of the customer's environments and make sure you can get into it. +
      +
    • + Try to use code server ("Transform") +
    • +
    • + Open a terminal in code server and run + + dbt-coves --version + +
    • +
    • + Try to use Airflow ("Orchestrate") +
    • +
    • + Look at logs in one of the DAGs +
    • +
    +
  • +
+

+ If your user does not have permission to get into the customer's cluster, temporarily add yourself to the necessary groups to check the cluster. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/update-ssl-certificates.html b/docs/docs_output/how-tos/update-ssl-certificates.html new file mode 100644 index 00000000..bd69ecef --- /dev/null +++ b/docs/docs_output/how-tos/update-ssl-certificates.html @@ -0,0 +1,1099 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Statement of Purpose +

+

+ The purpose of this document is to describe the process of upgrading SSL certificates for customers that are using custom certificates (i.e. not using Let's Encrypt). +

+ + +

+ Step 1: Prepare and Verify Certificate Files +

+

+ + This should be done soon after certificate files are received, and not last minute. + +

+

+ Ultimately, we need the following files: +

+
    +
  • + root.cer +
  • +
  • + root.secret.key +
  • +
  • + wildcard.cer +
  • +
  • + wildcard.secret.key +
  • +
+

+ The root.cer is the certificate for the root domain, i.e. datacoves.orrum.com +

+

+ wildcard.cer is a wildcard, i.e. *.datacoves.orrum.com +

+

+ All of these files should be in pem format; the cer files should have the complete keychain. A pem format looks like this: +

+
-----BEGIN CERTIFICATE-----
+MIIEjTCCAvWgAwIBAgIQQ71EG0d4110tqpc8I8ur/jANBgkqhkiG9w0BAQsFADCB
+pzEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMT4wPAYDVQQLDDVzc2Fz
+c2lAU2ViYXN0aWFucy1NYWNCb29rLVByby5sb2NhbCAoU2ViYXN0aWFuIFNhc3Np
+....
+JbszQlyzkyzBxQ5eiK3OUNdsB+n5Zo+TshRRL45wA9fZmvAizzmtehxJWUbidGL7
+eqqMWqdt11MTLJ3feOjGlryMFO6TIt/aH/91VkoLyVhsemuk5LukZ1nIxoWvzHcf
+y2cC+I3F8bWbYkRr92fmb8A=
+-----END CERTIFICATE-----
+
+

+ There should be several BEGIN / END certificate blocks in wildcard.cer and root.cer file; the wildcard.csr and root.csr files should have a complete certificate stack and should be suspect if they only contain a single certificate block. +

+

+ The key files will have a slightly different header, looking like this: +

+
-----BEGIN PRIVATE KEY-----
+MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCLf9Q17CQlOWDB
+CwWOuzL4+aalFwj2PR+OTuPnjHCI8stDedvmy5jtxSkdAL+5PgNu7ZJbKFhbODgT
+...
+OpuSfWnGVhOmii2aiYePtvNqDsLQv59MUxpUi8R6aw/XhG2Vb7t14+hbmUtRScUV
+LcGdNBdJyB8NaHYR/sNF1w==
+-----END PRIVATE KEY-----
+
+

+ + If you receive a pfx format file, we cover that in a section below. Read that section and go through those steps, then return to this section to complete verification. + +

+

+ You can verify the certs with the following commands: +

+
# Verify root
+openssl crl2pkcs7 -nocrl -certfile root.cer | openssl pkcs7 -print_certs -noout -text
+
+# Verify wildcard
+openssl crl2pkcs7 -nocrl -certfile wildcard.cer | openssl pkcs7 -print_certs -noout -text
+
+

+ And you will see several blocks with a Certificate header. One block should contain the host name for the certificate. In our example, datacoves.orrum.com: +

+
Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number:
+            01:cb:00:21:05:34:94:76:2b:f8:68:cf:8a:09:4c:02
+        Signature Algorithm: sha256WithRSAEncryption
+        Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1
+        Validity
+            Not Before: Apr 22 00:00:00 2024 GMT
+            Not After : Apr 21 23:59:59 2025 GMT
+        Subject: CN=datacoves.orrum.com
+
+

+ Note the hostname under 'Subject'; make sure that is the correct host. root will appear as above, as a single host name; wildcard should look like this instead: +

+
Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number:
+            0d:7f:e3:36:2c:db:b0:65:78:9a:c1:88:f8:06:12:4f
+        Signature Algorithm: sha256WithRSAEncryption
+        Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1
+        Validity
+            Not Before: Apr 22 00:00:00 2024 GMT
+            Not After : Apr 21 23:59:59 2025 GMT
+        Subject: CN=*.datacoves.orrum.com
+
+

+ Note the * symbol there in the subject. Also take note of the issuer; + + CN=Thawte TLS RSA CA G1 + + . +

+

+ Elsewhere in the certificate output, you should see a certificate for the issuer, such as: +

+
Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number:
+            09:0e:e8:c5:de:5b:fa:62:d2:ae:2f:f7:09:7c:48:57
+        Signature Algorithm: sha256WithRSAEncryption
+        Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2
+        Validity
+            Not Before: Nov  2 12:24:25 2017 GMT
+            Not After : Nov  2 12:24:25 2027 GMT
+        Subject: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1
+
+

+ Note the subject matches the issuer name. And finally, this certificate has an issuer as well; make sure that one is in the file. In this case, + + DigiCert Global Root G2 + + . In our example, you can find it here: +

+
Certificate:
+    Data:
+        Version: 3 (0x2)
+        Serial Number:
+            03:3a:f1:e6:a7:11:a9:a0:bb:28:64:b1:1d:09:fa:e5
+        Signature Algorithm: sha256WithRSAEncryption
+        Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2
+        Validity
+            Not Before: Aug  1 12:00:00 2013 GMT
+            Not After : Jan 15 12:00:00 2038 GMT
+        Subject: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2
+
+

+ Note again the 'subject' line. Typically PEM files will have certificates in the following order: +

+
    +
  • + Host's certificate +
  • +
  • + One or More Intermediate +
  • +
  • + Root certificate +
  • +
+

+ If you have to assemble a certificate from multiple parts, please be aware that this is the recommended ordering; however I don't think it will cause an error if you get the ordering wrong. +

+

+ Once your certificates are in order, you can verify the key with the following commands: +

+
openssl rsa -check -noout -in wildcard.secret.key
+openssl rsa -check -noout -in root.secret.key
+
+

+ Both should say: + + RSA key is okay + +

+

+ Now compare the modulus of the key and the cert: +

+
# These two should match
+openssl rsa -modulus -noout -in wildcard.secret.key | openssl md5
+openssl x509 -modulus -noout -in wildcard.cer | openssl md5
+
+# And these two should match
+openssl rsa -modulus -noout -in root.secret.key | openssl md5
+openssl x509 -modulus -noout -in root.cer | openssl md5
+
+

+ If the modulus doesn't match, it may be because the server certificate isn't the first certificate in the .cer file. Make sure the order is correct and try again. +

+ + +

+ Converting pfx format files +

+

+ We have received files in pfx format instead of pem and these require special handling. Follow the following directions to convert them to usable cer and key files, then use the following commands: +

+
# Assuming we have files wildcard.pfx and root.pfx
+#
+# Note: The --legacy option seems to be needed for most people, however
+#       some are able to do this without --legacy ... you can try without
+#       it first if you want.
+#
+# You will be asked for an "Import Password" -- just hit enter to skip that
+# If you get an error after the Import Password, you need --legacy
+
+openssl pkcs12 -in wildcard.pfx -cacerts -out wildcard_ca.cer -nodes -nokeys --legacy
+openssl pkcs12 -in root.pfx -cacerts -out root_ca.cer -nodes -nokeys --legacy
+
+

+ Edit the wildcard.cer and root.cer files, and remove the header above + + -----BEGIN CERTIFICATE----- + + . This header will resemble this: +

+
Bag Attributes: <No Attributes>
+subject=C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1
+issuer=C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2
+
+

+ + WARNING: Check the ENTIRE file, as there will probably be multiple of the headers. Any text not between + + -----BEGIN CERTIFICATE----- + + and + + -----END CERTIFICATE----- + + must be removed! + +

+

+ Next, you need to extract the server certs, thusly: +

+
# See notes above regarding --legacy and "Import Password"
+
+openssl pkcs12 -in wildcard.pfx -clcerts -nokeys -out wildcard.single.cer --legacy
+openssl pkcs12 -in root.pfx -clcerts -nokeys -out wildcard.single.cer --legacy
+
+

+ Once again, delete the header(s) above + + -----BEGIN CERTIFICATE----- + + in these files. Afterwards, run the following command: +

+
cat wildcard.single.cer wildcard_ca.cer > wildcard.cer
+cat root.single.cer root_ca.cer > root.cer
+
+

+ Now we're going to generate the private keys. When generating the private keys, set a temporary password (just the word + + password + + is fine); we will remove the password in the subsequent step. +

+
# See notes above regarding --legacy and "Import Password"
+openssl pkcs12 -in wildcard.pfx -nocerts -out wildcard.secrets.withpass.key --legacy
+openssl pkcs12 -in root.pfx -nocerts -out root.secrets.withpass.key --legacy
+
+

+ And finally, strip the passwords out for the final key files: +

+
openssl rsa -in wildcard.secrets.withpass.key -out wildcard.secret.key
+openssl rsa -in root.secrets.withpass.key -out root.secret.key
+
+

+ Now you have the files in PEM format, and you can go back to the section above to verify them. +

+ + +

+ Step 2: Update Cluster +

+

+ This step may vary from customer to customer, so see the appropriate subsection. +

+ + +

+ Orrum +

+

+ First, make sure you have the configuration repository checked out. In your + + config + + directory, clone it thusly: +

+
git clone https://github.com/datacoves/config-datacoves-orrum.git datacoves.orrum.com
+
+

+ In the + + datacoves.orrum.com + + directory, reveal the secrets. If you call this command within a sub directory, you'll get an error about + + core-api.env.secret + + cannot be found. +

+
git secret reveal -f
+
+

+ TODO: add instructions for setting up git secret +

+

+ Then in the + + base + + directory you will find + + root.cer + + , + + root.secret.key + + , + + wildcard.cer + + , and + + wildcard.secret.key + + . Replace these files with the new, verified files from step 1. +

+

+ Connect to the Orrum VPN. Instructions are here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum +

+

+ Make sure you are in your Orrum context, whatever that is named: +

+
# Use:
+# kubectl config get-contexts
+# To get context list if needed.
+kubectl config use-context orrum_new
+
+

+ Then run setup base. Return to the root directory of your git checkout to run + + cli.py + + thusly: +

+
# Activate your venv first if necessary
+./cli.py setup_base
+
+

+ After the cluster is updated (ingress will be updated), check the certificate: +

+
curl https://api.datacoves.orrum.com -vI
+
+

+ This should output a bunch of information about the certificate, including: +

+
* Server certificate:
+*  subject: CN=*.datacoves.orrum.com
+*  start date: Apr  8 07:33:48 2024 GMT
+*  expire date: Jul  1 07:33:47 2024 GMT
+*  subjectAltName: host "api.datacoves.orrum.com" matched cert's "*.datacoves.orrum.com"
+*  issuer: C=US; O=DigiCert Inc; OU=www.digicert.com; CN=Thawte TLS RSA CA G1
+*  SSL certificate verify ok.
+
+

+ (The CN should be the correct host, and the expire date should be correct). +

+

+ Check the non-wildcard version as well: +

+
curl https://datacoves.orrum.com -vI
+
+

+ Log into Orrum's launchpad and go into one of the environments to make sure pomerium doesn't have any issues; pomerium is particularly sensitive to certificate problems such as not having the full certificate chain in the root.cer / wildcard.cer files. +

+

+ If everything works alright, let's push the secrets. Be careful to not push up the key files as they will show up as "Untracked Files" in a + + git status + + . It is recommended you manually add the files thusly: +

+
# Go back to the config directory
+cd config/datacoves.orrum.com
+
+# See what files changed
+git status
+
+# Add only the changed files, do NOT add the .key files or the original .pfx
+git add .gitsecret/paths/mapping.cfg base/root.cer base/wildcard.cer secrets/core-api.env.secret secrets/docker-config.secret.json.secret secrets/rabbitmq.env.secert
+
+# You can also add any other safe file that you modified, just not those keys!
+
+git commit -m "Update certificates"
+git push
+
+

+ And it should be done! +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/upgrade-dbt-or-related-tools.html b/docs/docs_output/how-tos/upgrade-dbt-or-related-tools.html new file mode 100644 index 00000000..0795b259 --- /dev/null +++ b/docs/docs_output/how-tos/upgrade-dbt-or-related-tools.html @@ -0,0 +1,725 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ How to upgrade dbt or related tools +

+ + +

+ dbt-coves +

+
    +
  • + Pull Request on dbt-coves and merge. This will deploy a new pypi version +
  • +
+ + +

+ All libraries +

+
    +
  • + Get current version of new libraries +
  • +
  • + Upgrade code-server (src/code-server/code-server) docker image requirements.txt and labels +
  • +
  • + Upgrade ci images libraries: ci/airflow and ci/basic, update labels. +
  • +
  • + Upgrade airflow image libraries, install the new libraries in the environment targeted for dag runs, update labels accordingly. +
  • +
  • + Run script that updates labels on docker files +
  • +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/how-tos/work-on-a-pre-release-locally.html b/docs/docs_output/how-tos/work-on-a-pre-release-locally.html new file mode 100644 index 00000000..ac8c4a79 --- /dev/null +++ b/docs/docs_output/how-tos/work-on-a-pre-release-locally.html @@ -0,0 +1,755 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Make and work on a pre-release locally +

+

+ Sometimes you need to change images and test them locally without affecting production releases. +

+

+ To do so: +

+ + +

+ Build the image you just changed +

+
./cli.py build_and_push <path to service>  # i.e. src/core/api
+
+

+ You'll need to specify the issue # +

+

+ This command will build and push a new image prefixing its name with the ticket number your provided. +

+ + +

+ Generate the pre-release +

+

+ Once the image was pushed, you can create a new pre-release to try that image: +

+
./cli.py generate_release
+
+

+ This will create a new release file under /releases and will also be pushed to GitHub releases so other devs can reuse it. +

+ + +

+ Set the pre-release on datacoveslocal.com cluster +

+
./cli.py set_release
+
+

+ Select + + datacoveslocal.com + + . +

+

+ You might need to undo the file changes before pushing to PR branch. +

+ + +

+ Upgrade datacoves in local cluster +

+
./cli.py install
+
+

+ Select + + datacoveslocal.com + +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/implementation/index.html b/docs/docs_output/implementation/index.html new file mode 100644 index 00000000..31d10174 --- /dev/null +++ b/docs/docs_output/implementation/index.html @@ -0,0 +1,688 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+ +
+
+
+
+ + + + + diff --git a/docs/docs_output/implementation/operator.html b/docs/docs_output/implementation/operator.html new file mode 100644 index 00000000..d95fb78b --- /dev/null +++ b/docs/docs_output/implementation/operator.html @@ -0,0 +1,1021 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Operator documentation +

+ + +

+ Overview +

+

+ The datacoves + + operator + + is a kubernetes + + controller + + , written in go, +scaffolded using + + kubebuilder + + . It is responsible for setting up and +managing the kubernetes resources that make up a + + workspace + + (a.k.a. an + + environment + + ). Each workspace has its own k8s namespace. The operator's source +code is in + + src/core/operator/ + + . +

+

+ The operator watches a few custom resources that specify what to set up. They +are defined in + + api/v1/ + + . +

+
    +
  • +

    + + Workspace + + : The main resource, fully describing a workspace. Parts of the +configuration are held in other resources, but the workspace references them all +and is the root of the configuration. Whenever a change to a model in the core +api database impacts a workspace configuration, the core-api's workspace.sync +task recomputes and (re-)writes the corresponding workspace k8s resource. The +operator detects the resource update and runs the reconciliation process to +apply any required changes to the kubernetes resources that compose the workspace. +

    +
  • +
  • +

    + + User + + : Each workspace has a set of users, and each user gets certain resources, +such as a code-server deployment. +

    +
  • +
  • +

    + + HelmRelease + + : Most services set up by the operator are installed using helm. +A HelmRelease specifies that a helm chart should be installed, using a certain +version and helm values. +

    +
  • +
+ + +

+ Background +

+

+ Some useful background knowledge to have and resources to review: +

+ + +

+ Go +

+ + + +

+ Kubernetes +

+ + + +

+ Implementation: Reconcilers +

+ + +

+ Change detection and reconciliation +

+

+ The entry points to our code are the + + Reconcile + + methods for each resource, in + + controllers/*_controller.go + + . The framework + + watches + + kubernetes resources to +determine when to call + + Reconcile + + . The + + SetupWithManager + + method can be used +to influence when + + Reconcile + + should be called. +

+

+ Reconciliation must be idempotent. If an error is returned, or there's a panic, +the framework will retry calling + + Reconcile + + repeatedly, less frequently each +time. +

+

+ To simplify change detection and ensure deployments are restarted when a secret +or configmap that affects them changes, we treat secrets and configmaps as +immutable values. We include a hash of their contents in their names. This means +to start using the new version references to them must be updated. This implies +that resources using them will change too, which means all changes can be detected +by watching the resource that has the reference, without checking the contents +of the secret or configmap. +

+ + +

+ Applying changes to derived resources +

+

+ Reconciliation is conceptualy stateless. We compute a set of derived resources +from the current value of the Workspace resource. We would like to have a +primitive that is the equivalent of + + kubectl apply + + in our go code. Unfortunately +reusing that mechanism is/was not available when writing the operator so we had +to build our own resource diffing. These are the + + reconcile* + + functions in + + controllers/reconcilers.go + + . +

+ + +

+ Concurrency +

+

+ The framework runs + + Reconcile + + concurrently for different resource types. It also +runs the reconciliation for different resources concurrently, at most + + MaxConcurrentReconciles + + at once. Reconciliation of multiple changes to a single resource happens serially. +

+

+ We take advantage of this fact to isolate failures. The Workspace reconciler +applies changes to HelmRelease and User resources. This way the reconciliaton of +a HelmRelease or a User failing won't make the whole Workspace reconciliation fail. +

+ + +

+ Implementation: Helm runner +

+

+ Before having the + + helm + + module carry out the installation of helm charts by +starting helm subprocesses we used to call into helm's go code directly from +the helmrelease controller. This caused two problems: +

+
    +
  • + When the operator was restarted the helm release (stored by helm in a k8s secret) + could be left in a pending-upgrade state, which should only happen if helm is + still running. This is due to helm not cleaning up when interrupted. +
  • +
  • + We run out of memory, most likely due to a memory leak involving helm state. +
  • +
+

+ To address these issues we implemented the + + helm + + module, which schedules helm +supbrocesses so that we can control their execution. It is a separate module +that runs a singleton scheduler process and receives requests to run helm over a +channel. The helmrelease_controller simply sends requests to this process +without waiting or checking results. +

+

+ Currently helm install failures will be logged but won't be retried. Manual +intervention is required in this case. In any case, retrying the whole helm +install is unlikely to succeed if nothing changed. Certain kinds of intermitent +failures could be detected and retried within an operation if desired. But in +this case, not retrying the helmrelease reconciliation as a whole is best, I think. +

+

+ The meat of the implementation is in the + + run + + function. It keeps track of +running and pending operations (and their potential memory usage) and spawns new +goroutines for each install/upgrade/uninstall operation. It is somewhat subtle +code. You should understand goroutines and channels well before touching it. +

+

+ When the operator is signaled by kubernetes to exit, we must be as gentle as +possible with helm subprocesses to avoid leaving the releases in a bad state. +There's a grace period between the first signal that the program will exit +and forceful termination. We use it to send SIGTERM to all the helm subprocesses, +which should allow them to exit more cleanly than if they were SIGKILLed. We +haven't seen any more chart's left in + + pending-upgrade + + after this change. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/index.html b/docs/docs_output/index.html new file mode 100644 index 00000000..aa3bfbc4 --- /dev/null +++ b/docs/docs_output/index.html @@ -0,0 +1,778 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ config +

+

+ This directory holds configuration, organized by the cluster's domain name. +

+

+ Most sudirectories are git submodules, to keep their configuration separate. +

+

+ Every cluster configuration directory can have the following subdirectories: +

+
    +
  • + base: Kustomize directory for the kubernetes cluster global components and configuration. +
  • +
  • + kind: Configuration to create a kind cluster. +
  • +
  • + eks: Configuration to create an eks cluster. +
  • +
  • + cluster-params.yaml, cluster-params.secret.yaml: Cluster configuration. +
  • +
  • + secrets: Secrets, those that are not specific to an environment. +
  • +
  • + environments: Environment configurations, with one subdirectory per environment. +
  • +
+

+ The datacoveslocal.com cluster, for example, looks like this: +

+
config/
+├── datacoveslocal.com/
+│   ├── base/
+│   ├── environments/
+│   ├── kind/
+│   ├── secrets/
+│   ├── cluster-params.secret.yaml
+│   ├── cluster-params.secret.yaml.secret
+│   └── cluster-params.yaml
+...
+
+ + +

+ docs +

+

+ Documentation. +

+
docs
+├── client-docs                            For clients.
+│   ├── jnj
+│   └──  ...
+├── how-tos                                For devops. How to do certain things.
+│   ├── do-thing-x
+│   └──  ...
+├── dev-logs                               Developer logs. Record something you did for future reference. Be careful not to include secrets.
+│   ├── 2021-09-eks-setup.md
+│   └── ...
+├── issues-resolutions                     For support team. How to solve common user issues
+│
+└── ...
+
+ + +

+ scripts +

+

+ Python scripts to manage the project. Usually called by ./cli.py commands. +

+ + +

+ src +

+

+ Datacoves source code and docker image definitions. The core components are in + + src/core + + . +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/issues-resolutions/airflow-corrupted-dag-logs.html b/docs/docs_output/issues-resolutions/airflow-corrupted-dag-logs.html new file mode 100644 index 00000000..517d1d74 --- /dev/null +++ b/docs/docs_output/issues-resolutions/airflow-corrupted-dag-logs.html @@ -0,0 +1,769 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ DAG logs were serialized with a newer version of pickle than the installed on Airflow webserver +

+ + +

+ Logs +

+
Traceback (most recent call last):
+  File "/home/airflow/.local/bin/airflow", line 8, in <module>
+    sys.exit(main())
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/__main__.py", line 38, in main
+    args.func(args)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 51, in command
+    return func(*args, **kwargs)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/cli.py", line 99, in wrapper
+    return f(*args, **kwargs)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py", line 75, in scheduler
+    _run_scheduler_job(args=args)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py", line 46, in _run_scheduler_job
+    job.run()
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/base_job.py", line 244, in run
+    self._execute()
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 739, in _execute
+    self._run_scheduler_loop()
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 827, in _run_scheduler_loop
+    num_queued_tis = self._do_scheduling(session)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 909, in _do_scheduling
+    callback_to_run = self._schedule_dag_run(dag_run, session)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1151, in _schedule_dag_run
+    schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
+    return func(*args, **kwargs)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 522, in update_state
+    info = self.task_instance_scheduling_decisions(session)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
+    return func(*args, **kwargs)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 640, in task_instance_scheduling_decisions
+    tis = list(self.get_task_instances(session=session, state=State.task_states))
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper
+    return func(*args, **kwargs)
+  File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 441, in get_task_instances
+    return tis.all()
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2683, in all
+    return self._iter().all()
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 1335, in all
+    return self._allrows()
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 408, in _allrows
+    rows = self._fetchall_impl()
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 1243, in _fetchall_impl
+    return self._real_result._fetchall_impl()
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 1636, in _fetchall_impl
+    return list(self.iterator)
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/loading.py", line 120, in chunks
+    fetch = cursor._raw_all_rows()
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 400, in _raw_all_rows
+    return [make_row(row) for row in rows]
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 400, in <listcomp>
+    return [make_row(row) for row in rows]
+  File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 1816, in process
+    return loads(value)
+  File "/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py", line 275, in loads
+    return load(file, ignore, **kwds)
+  File "/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py", line 270, in load
+    return Unpickler(file, ignore=ignore, **kwds).load()
+  File "/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py", line 472, in load
+    obj = StockUnpickler.load(self)
+ValueError: unsupported pickle protocol: 5
+
+ + +

+ Solution +

+

+ Connect to scheduler or triggerer pod and then remove DAG by running: +

+
airflow dags delete <dag id>
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/issues-resolutions/dbt-core-debugging.html b/docs/docs_output/issues-resolutions/dbt-core-debugging.html new file mode 100644 index 00000000..219705aa --- /dev/null +++ b/docs/docs_output/issues-resolutions/dbt-core-debugging.html @@ -0,0 +1,740 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Python dbt-core debugging +

+ + +

+ Context: dbt does not respond to any of it's commands +

+

+ Due to changes in environment variable handling on dbt-core side, a read-only + + $DBT_PROJECT_DIR + + led to dbt not responding to anything but the + + --version + + call. +

+

+ dbt-not-responding +

+

+ All dbt commands returned + + exit code 2 + +

+
2 The dbt invocation completed with an unhandled error (eg. ctrl-c, network interruption, etc).
+
+ + +

+ Solution +

+

+ Using dbt-core python library and it's + + dbtRunner + + gives us the possibility to receive that + + "unhandled error" + +

+
>>> from dbt.cli.main import dbtRunner
+>>> dbt_cli = dbtRunner()
+>>> dbt_cli.invoke(["ls"])
+dbtRunnerResult(success=False, exception=OSError(30, 'Read-only file system'), result=None)
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/issues-resolutions/docker-image-debugging.html b/docs/docs_output/issues-resolutions/docker-image-debugging.html new file mode 100644 index 00000000..0720d4ce --- /dev/null +++ b/docs/docs_output/issues-resolutions/docker-image-debugging.html @@ -0,0 +1,732 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Debugging images outside Datacoves. +

+

+ Sometimes we need to review images that are running in Datacoves in a simpler way to debug processes, review the versions of libraries, versions of pipelines, etc. +

+
    +
  1. + Create + + compose.yaml + + or + + docker-compose.yaml + + file +
  2. +
+
version: '3'
+
+services:
+  snowflake:
+    image: "taqy-docker.artifactrepo.jnj.com/datacoves/ci-basic-dbt-snowflake:3.1"
+    command: bash -c "sleep infinity"
+
+
    +
  1. + Run commands +
  2. +
+
docker compose run --rm snowflake bash -c "pip show dbt-core dbt-snowflake"
+
+
    +
  1. + Get a terminal +
  2. +
+
docker compose up -d
+docker ps
+docker exec -ti <container-id> /bin/bash
+
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/issues-resolutions/docker-push-stopped-working.html b/docs/docs_output/issues-resolutions/docker-push-stopped-working.html new file mode 100644 index 00000000..379d6bab --- /dev/null +++ b/docs/docs_output/issues-resolutions/docker-push-stopped-working.html @@ -0,0 +1,733 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Reset docker config authentication +

+

+ If that was the case, you might need to log in and log out again after a password reset: +

+
docker logout
+
+

+ Then, remove the entry for taqy-docker.artifactrepo.jnj.com in + + ~/.docker/config.json + + . +

+

+ Finally, login again: +

+
docker login taqy-docker.artifactrepo.jnj.com
+
+ + +

+ Unlock your artifactory account +

+

+ Sometimes your account can get blocked and you need to unlock it. +

+
    +
  1. + Go to + + appdevtools + +
  2. +
  3. + Under support, user acces, click on + + Unlock Artifactory Account + + . +
  4. +
+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/issues-resolutions/helm-chart.html b/docs/docs_output/issues-resolutions/helm-chart.html new file mode 100644 index 00000000..aa3e5135 --- /dev/null +++ b/docs/docs_output/issues-resolutions/helm-chart.html @@ -0,0 +1,767 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Helm Chart Resolutions +

+ + +

+ How to patch releases? +

+

+ Sometimes we want to change a value in the + + Helm Chart + + , but to do this we need to edit some component such as an + + adapter + + or the + + Operator + + and generate a new release, so this functionality is very useful to be able to skip that whole process and do our tests more quickly. +

+ + +

+ Option No.1 +

+
    +
  1. + Get the values from the release. +
  2. +
+
# helm get values <release> -n <namespace>
+helm get values dev123-datahub -n dcw-dev123 > values.yaml
+
+
    +
  1. + Edit/add the values to the file. +
  2. +
+
vi values.yaml
+
+
    +
  1. + Add the repository if does not exists. +
  2. +
+
# helm repo add <name> <url>
+helm repo add datahub https://helm.datahubproject.io/
+
+
    +
  1. + Patch the helm chart. +
  2. +
+
# helm upgrade --version <x.x.x> -f values.yaml <release> <repository> -n <namespace>
+helm upgrade --version 0.4.16 -f values.yaml dev123-datahub datahub/datahub -n dcw-dev123
+
+ + +

+ Option No.2 +

+
    +
  1. + Patch the helm chart. +
  2. +
+
# helm upgrade <release> <chart> -n <namespace> --set key1=value1,key2=value2
+helm upgrade dev123-datahub datahub/datahub -n dcw-dev123 --set key1=value1,key2=value2
+
+

+ + More info + +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/issues-resolutions/pomerium-not-allowing-access.html b/docs/docs_output/issues-resolutions/pomerium-not-allowing-access.html new file mode 100644 index 00000000..8d862e4c --- /dev/null +++ b/docs/docs_output/issues-resolutions/pomerium-not-allowing-access.html @@ -0,0 +1,733 @@ + + + + + + + + + + + + + Datacoves Docs + + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + +

+ Pomerium does not allow access to environments +

+ + +

+ Problem +

+

+ Launchapd works OK, but pomerium returning timeout, logs like these are found: +

+
{"level":"info","X-Forwarded-For":["10.255.255.2,10.10.0.8"],"X-Forwarded-Host":["authenticate-dev123.orrum.datacoves.com"],"X-Forwarded-Port":["443"],"X-Forwarded-Proto":["http"],"X-Real-Ip":["10.255.255.2"],"ip":"127.0.0.1","user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36","request-id":"834a4284-9d39-474a-abb5-cd7203755386","error":"Bad Request: internal/sessions: session is not found","time":"2023-08-17T13:13:39Z","message":"authenticate: session load error"}
+{"level":"info","service":"envoy","upstream-cluster":"pomerium-control-plane-http","method":"GET","authority":"authenticate-dev123.orrum.datacoves.com","path":"/.pomerium","user-agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36","referer":"","forwarded-for":"10.255.255.2,10.10.0.8","request-id":"834a4284-9d39-474a-abb5-cd7203755386","duration":15000.251354,"size":24,"response-code":504,"response-code-details":"upstream_response_timeout","time":"2023-08-17T13:13:55Z","message":"http-request"}
+
+ + +

+ Cause +

+

+ This is a DNS resolution issue that pomerium is having. Typically this happens when the cluster model has wrong values on + + internal_ip + + or + + external_ip + + . +This could have happened when the DB was copied to a different cluster, of the cluster changed their IPs. +

+ + +

+ Solution +

+

+ Remove the values on those 2 fields and save the cluster model again. On + + save + + , it will regenerate those IPs and Pomerium will be reinstalled. +

+
+
+
+
+
+ + + + + diff --git a/docs/docs_output/robots.txt b/docs/docs_output/robots.txt new file mode 100644 index 00000000..39f8daf0 --- /dev/null +++ b/docs/docs_output/robots.txt @@ -0,0 +1 @@ +Sitemap: https://docs.datacoves.com/sitemap.txt \ No newline at end of file diff --git a/docs/docs_output/sitemap.txt b/docs/docs_output/sitemap.txt new file mode 100644 index 00000000..f2c128e7 --- /dev/null +++ b/docs/docs_output/sitemap.txt @@ -0,0 +1,69 @@ +https://docs.datacoves.com/implementation/index.html +https://docs.datacoves.com/implementation/operator.html +https://docs.datacoves.com/issues-resolutions/docker-image-debugging.html +https://docs.datacoves.com/issues-resolutions/docker-push-stopped-working.html +https://docs.datacoves.com/issues-resolutions/dbt-core-debugging.html +https://docs.datacoves.com/issues-resolutions/helm-chart.html +https://docs.datacoves.com/issues-resolutions/pomerium-not-allowing-access.html +https://docs.datacoves.com/issues-resolutions/airflow-corrupted-dag-logs.html +https://docs.datacoves.com/index.html +https://docs.datacoves.com/how-tos/recover-disk-on-aks.html +https://docs.datacoves.com/how-tos/setup-oauth-on-azure.html +https://docs.datacoves.com/how-tos/install-python-reqs-on-jnj-bastion.html +https://docs.datacoves.com/how-tos/work-on-a-pre-release-locally.html +https://docs.datacoves.com/how-tos/manage-profiles-and-image-sets.html +https://docs.datacoves.com/how-tos/billing-system.html +https://docs.datacoves.com/how-tos/how-to-create-a-ssl-certificate.html +https://docs.datacoves.com/how-tos/debug-airflow-workers.html +https://docs.datacoves.com/how-tos/update-ssl-certificates.html +https://docs.datacoves.com/how-tos/choose-ec2-nodes.html +https://docs.datacoves.com/how-tos/setup-s3-for-dbt-api.html +https://docs.datacoves.com/how-tos/celery-monitoring.html +https://docs.datacoves.com/how-tos/q-and-a.html +https://docs.datacoves.com/how-tos/security-vulnerabilities-fix.html +https://docs.datacoves.com/how-tos/list-code-server-pods-processes.html +https://docs.datacoves.com/how-tos/onboard-a-new-project-on-datacoves.html +https://docs.datacoves.com/how-tos/datacoves-versioning.html +https://docs.datacoves.com/how-tos/move-a-gpg-secret-key.html +https://docs.datacoves.com/how-tos/index.html +https://docs.datacoves.com/how-tos/custom-dns.html +https://docs.datacoves.com/how-tos/grafana-loki-storage-config-providers.html +https://docs.datacoves.com/how-tos/register-github-self-hosted-runner.html +https://docs.datacoves.com/how-tos/airflow-configuration.html +https://docs.datacoves.com/how-tos/update-kubernetes-and-datacoves.html +https://docs.datacoves.com/how-tos/set-maintenance-mode.html +https://docs.datacoves.com/how-tos/make-and-install-a-release.html +https://docs.datacoves.com/how-tos/release-notes.html +https://docs.datacoves.com/how-tos/connect-to-kenvue-cluster-using-a-bastion.html +https://docs.datacoves.com/how-tos/grafana-grant-permisions.html +https://docs.datacoves.com/how-tos/administrate-east-us-a-aks-cluster.html +https://docs.datacoves.com/how-tos/request-access-to-a-cloud-pc-on-kenvue.html +https://docs.datacoves.com/how-tos/hotfix.html +https://docs.datacoves.com/how-tos/reset-datahub.html +https://docs.datacoves.com/how-tos/grafana-loki-storage-config.html +https://docs.datacoves.com/how-tos/codeserver-images.html +https://docs.datacoves.com/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.html +https://docs.datacoves.com/how-tos/prometheus-queries.html +https://docs.datacoves.com/how-tos/debug-dbt-errors-when-return-code-is-not-zero.html +https://docs.datacoves.com/how-tos/upgrade-dbt-or-related-tools.html +https://docs.datacoves.com/how-tos/testing-alerts.html +https://docs.datacoves.com/dev-logs/2021-09-eks-setup.html +https://docs.datacoves.com/dev-logs/index.html +https://docs.datacoves.com/dev-logs/2022-04-jnj-artemisdev-configuration.html +https://docs.datacoves.com/dev-logs/2021-12-jnj-ensembledev-deployment.html +https://docs.datacoves.com/dev-logs/2022-05-setup-aks-postgres-flexible-server.html +https://docs.datacoves.com/dev-logs/2021-06-create-er-diagram.html +https://docs.datacoves.com/dev-logs/2022-04-jnj-ensembletest-deployment.html +https://docs.datacoves.com/client-docs/kenvue/how-to-setup-helm-chart.html +https://docs.datacoves.com/client-docs/index.html +https://docs.datacoves.com/client-docs/orrum/index.html +https://docs.datacoves.com/client-docs/jnj/1-cluster-requirements.html +https://docs.datacoves.com/client-docs/jnj/4-configure-bitbucket-and-jenkins.html +https://docs.datacoves.com/client-docs/jnj/index.html +https://docs.datacoves.com/client-docs/jnj/5-deployment.html +https://docs.datacoves.com/client-docs/jnj/6-access.html +https://docs.datacoves.com/client-docs/jnj/7-configure-sa-docker-in-kubernates.html +https://docs.datacoves.com/client-docs/jnj/3-configure-bastion-ec2-instance.html +https://docs.datacoves.com/client-docs/jnj/2-configuration.html +https://docs.datacoves.com/client-docs/jnj/8-summary-requirements-new-cluster.html +https://docs.datacoves.com/client-docs/ccs/cluster-requirements-azure.html diff --git a/docs/how-tos/DeploymentTemplate.doc b/docs/how-tos/DeploymentTemplate.doc new file mode 100644 index 00000000..4be6e7e5 --- /dev/null +++ b/docs/how-tos/DeploymentTemplate.doc @@ -0,0 +1,142 @@ +{\rtf1\ansi\deff3\adeflang1025 +{\fonttbl{\f0\froman\fprq2\fcharset0 Times New Roman;}{\f1\froman\fprq2\fcharset2 Symbol;}{\f2\fswiss\fprq2\fcharset0 Arial;}{\f3\froman\fprq2\fcharset0 Liberation Serif{\*\falt Times New Roman};}{\f4\fswiss\fprq2\fcharset0 Liberation Sans{\*\falt Arial};}{\f5\froman\fprq2\fcharset0 Times New Roman;}{\f6\froman\fprq2\fcharset0 Liberation Sans{\*\falt Arial};}{\f7\froman\fprq2\fcharset0 Courier New;}{\f8\fnil\fprq2\fcharset0 Noto Sans CJK SC;}{\f9\fnil\fprq2\fcharset0 Courier New;}{\f10\fnil\fprq2\fcharset0 Lohit Devanagari;}{\f11\fnil\fprq0\fcharset128 Lohit Devanagari;}} +{\colortbl;\red0\green0\blue0;\red0\green0\blue255;\red0\green255\blue255;\red0\green255\blue0;\red255\green0\blue255;\red255\green0\blue0;\red255\green255\blue0;\red255\green255\blue255;\red0\green0\blue128;\red0\green128\blue128;\red0\green128\blue0;\red128\green0\blue128;\red128\green0\blue0;\red128\green128\blue0;\red128\green128\blue128;\red192\green192\blue192;} +{\stylesheet{\s0\snext0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Normal;} +{\s1\sbasedon20\snext1\rtlch\af10\afs28 \ltrch\hich\af4\loch\widctlpar\sb440\sa60\keepn\f4\fs34\b\dbch\af8 Heading 1;} +{\s2\sbasedon20\snext2\rtlch\af10\afs28 \ltrch\hich\af4\loch\widctlpar\sb440\sa60\keepn\f4\fs28\b\dbch\af8 Heading 2;} +{\s3\sbasedon20\snext3\rtlch\af10\afs28 \ltrch\hich\af4\loch\widctlpar\sb440\sa60\keepn\f4\fs24\b\dbch\af8 Heading 3;} +{\s4\sbasedon20\snext4\rtlch\af10\afs28 \ltrch\hich\af4\loch\widctlpar\sb440\sa60\keepn\f4\fs24\b\dbch\af8 Heading 4;} +{\*\cs15\snext15\loch\super\fs20 Endnote Characters;} +{\*\cs16\snext16\loch\super\fs20 Endnote Anchor;} +{\*\cs17\snext17\loch\super\fs20 Footnote Characters;} +{\*\cs18\snext18\loch\super\fs20 Footnote Anchor;} +{\*\cs19\snext19\loch\cf9\ul\ulc0 Hyperlink;} +{\s20\sbasedon0\snext21\rtlch\af10\afs28 \ltrch\hich\af4\loch\widctlpar\sb240\sa120\keepn\f4\fs28\dbch\af8 Heading;} +{\s21\sbasedon0\snext21\hich\af5\loch\sl276\slmult1\widctlpar\sb0\sa140\f5\fs24\dbch\af9 Text Body;} +{\s22\sbasedon21\snext22\rtlch\af11 \ltrch\hich\af5\loch\sl276\slmult1\widctlpar\sb0\sa140\f5\fs24\dbch\af9 List;} +{\s23\sbasedon0\snext23\rtlch\af11\afs24\ai \ltrch\hich\af5\loch\widctlpar\sb120\sa120\noline\f5\fs24\i\dbch\af9 Caption;} +{\s24\sbasedon0\snext24\rtlch\af11 \ltrch\hich\af5\loch\widctlpar\noline\f5\fs24\dbch\af9 Index;} +{\s25\snext25\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Arrowhead List;} +{\s26\sbasedon0\snext26\hich\af5\loch\widctlpar\li1440\ri1440\lin1440\rin1440\fi0\sb0\sa120\f5\fs24\dbch\af9 Block Text;} +{\s27\snext27\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Box List;} +{\s28\snext28\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Bullet List;} +{\s29\sbasedon44\snext29\loch\tx1584 Chapter Heading;} +{\s30\sbasedon24\snext30\rtlch\af11 \ltrch\hich\af5\loch\widctlpar\li720\ri0\lin720\rin0\fi-431\noline\f5\fs24\dbch\af9 Contents 1;} +{\s31\sbasedon24\snext31\rtlch\af11 \ltrch\hich\af5\loch\widctlpar\li1440\ri0\lin1440\rin0\fi-431\noline\f5\fs24\dbch\af9 Contents 2;} +{\s32\sbasedon24\snext32\rtlch\af11 \ltrch\hich\af5\loch\widctlpar\li2160\ri0\lin2160\rin0\fi-431\noline\f5\fs24\dbch\af9 Contents 3;} +{\s33\sbasedon24\snext33\rtlch\af11 \ltrch\hich\af5\loch\widctlpar\li2880\ri0\lin2880\rin0\fi-431\noline\f5\fs24\dbch\af9 Contents 4;} +{\s34\snext34\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af6\loch\qc\sb240\sa120\nowidctlpar\hyphpar0\ltrpar\f6\fs32\b\cf0\lang1033\kerning1\dbch\af12\langfe2052 Contents Header;} +{\s35\snext35\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Dashed List;} +{\s36\snext36\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Diamond List;} +{\s37\sbasedon0\snext37\hich\af5\loch\widctlpar\f5\fs24\dbch\af9 Endnote;} +{\s38\sbasedon0\snext38\hich\af5\loch\widctlpar\f5\fs20\dbch\af9 Footnote;} +{\s39\snext39\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Hand List;} +{\s40\snext40\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Heart List;} +{\s41\snext41\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Implies List;} +{\s42\sbasedon47\snext42\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\f5\fs24\dbch\af9 Lower Case List;} +{\s43\snext43\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af3\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\cf0\f3\fs24\lang1033\kerning1\dbch\af12\langfe2052 Lower Roman List;} +{\s44\snext44\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af3\loch\tx431\nowidctlpar\hyphpar0\ltrpar\cf0\f3\fs24\lang1033\kerning1\dbch\af12\langfe2052 Numbered Heading 1;} +{\s45\snext45\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af3\loch\tx431\nowidctlpar\hyphpar0\ltrpar\cf0\f3\fs24\lang1033\kerning1\dbch\af12\langfe2052 Numbered Heading 2;} +{\s46\snext46\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af3\loch\tx431\nowidctlpar\hyphpar0\ltrpar\cf0\f3\fs24\lang1033\kerning1\dbch\af12\langfe2052 Numbered Heading 3;} +{\s47\snext47\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Numbered List;} +{\s48\sbasedon0\snext48\hich\af7\loch\widctlpar\f7\fs24\dbch\af9 Plain Text;} +{\s49\sbasedon44\snext0\loch\tx1584 Section Heading;} +{\s50\snext50\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Square List;} +{\s51\snext51\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Star List;} +{\s52\snext52\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Tick List;} +{\s53\snext53\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\nowidctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052 Triangle List;} +{\s54\sbasedon47\snext54\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\f5\fs24\dbch\af9 Upper Case List;} +{\s55\sbasedon47\snext55\hich\af5\loch\li720\ri0\lin720\rin0\fi-431\f5\fs24\dbch\af9 Upper Roman List;} +}{\*\generator LibreOffice/7.3.7.2$Linux_X86_64 LibreOffice_project/30$Build-2}{\info{\author sconley}{\creatim\yr0\mo0\dy0\hr0\min0}{\revtim\yr2024\mo11\dy14\hr15\min37}{\printim\yr0\mo0\dy0\hr0\min0}}{\*\userprops}\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720\deftab720 +\hyphauto1\viewscale100 +{\*\pgdsctbl +{\pgdsc0\pgdscuse451\pgwsxn12240\pghsxn15840\marglsxn1440\margrsxn1440\margtsxn1440\margbsxn1440\pgdscnxt0 Default Page Style;} +{\pgdsc1\pgdscuse451\pgndec\pgwsxn12240\pghsxn15840\marglsxn1134\margrsxn1134\margtsxn1134\margbsxn1134\pgdscnxt1 Endnote;}} +\formshade\paperh15840\paperw12240\margl1440\margr1440\margt1440\margb1440\sectd\sbknone\pgndec\sftnnar\saftnnar\sectunlocked1\pgwsxn12240\pghsxn15840\marglsxn1440\margrsxn1440\margtsxn1440\margbsxn1440\ftnbj\ftnstart1\ftnrstcont\ftnnar\aenddoc\aftnrstcont\aftnstart1\aftnnar +{\*\ftnsep\chftnsep}\pgndec\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\qc\ltrpar{\loch\fs20\lang1033\b\loch +Deployment Plan - 2024-10-14 Release} +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar\loch\fs20\lang1033\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar\loch\fs20\lang1033\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\ul\ulc0\b\loch +Preliminary} +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\loch +These steps will be done prior to the scheduled release, done at least an hour in advance.\u8198\'20 \u8198\'20 These steps do not include doing the "generate_release"; these steps assume you have a release already.\u8198\'20 \u8198\'20 You should also do a cli.py special_release_notes\u8198\'20 \u8198\'20 run and update this documentation as necessary.} +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar\loch\fs24\loch + +\par \trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +1. Confirm current version in deployment cluster}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +2. Using the version from above, generate release notes}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +2. a. Clean up release notes, re-generating them until they look nice}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +2. b. Deliver release notes to Noel.\u8198\'20 \u8198\'20 Call out any breaking changes.}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +3. Confirm feature enablement -- check in Panel, compare to cluster-params.yaml}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +4. Change the cluster-params.yaml version (cli.py set_release)}{\loch\fs20\lang1033\loch +. }{\rtlch\ab \ltrch\lang1033\loch\fs20\lang1033\b\loch +Do not clean up old releases when using set_release -- that can cause a broken deployment.}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +5. Commit changes to configuration repository.}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +6. }{\loch\fs20\lang1033\b\loch +J&J/KV Only:}{\loch\fs20\lang1033\loch + Log into bastion, check out latest configuration, reveal secrets, and do cli.py install }{\loch\fs20\lang1033\b\loch +ONLY DOWNLOAD IMAGES}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9269\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +7. Plan Profile Image Set Updates for the environment}\cell\row\pard\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\nowidctlpar\ltrpar\loch\fs24\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar\loch\fs20\lang1033\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\ul\ulc0\b\loch +Installation} +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\loch +These are steps that are done at install time.} +\par \trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +1. Check out latest configuration and set branch to the tag that is going to be deployed.}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +2. git secret reveal -f}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +3. cli.py install}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +4. Run create permissions}\cell\row\pard\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\nowidctlpar\ltrpar\loch\fs24\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\sl240\slmult1\ql\ltrpar\loch\fs20\lang1033\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\ul\ulc0\b\loch +Verification} +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\loch +These are steps done to verify the installation.} +\par \trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +1. Check kubectl get pods -A and make sure all pods are in an expected state.}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +2. Log into panel.}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +3. Log into workbench}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +4. Go to a couple of environments and make sure everything that is expected to start, starts, and seems to work.}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +5. Notify team installation is complete - Noel or Mayra to make sure dbt debug works and check airflow for errors.}\cell\row\pard\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\nowidctlpar\ltrpar\loch\fs24\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\sl240\slmult1\ql\ltrpar\loch\fs20\lang1033\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\ul\ulc0\b\loch +Post Installation} +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\ltrpar{\loch\fs20\lang1033\loch +These steps are done after the installation is verified and are any 'cleanup' steps that need to be done.} +\par \trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\sl240\slmult1\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +Update Notion's Release version: }{{\field{\*\fldinst HYPERLINK "https://www.notion.so/datacoves/017ecb2e9e154a30a2434f7853866f61?v=cc78475d9fbf4e7abef67356560b0009" }{\fldrslt {\loch\fs20\lang1033\loch +{\*\bkmkstart https://www.notion.so/datacove}https://www.notion.so/datacoves/017ecb2e9e154a30a2434f7853866f61?v=cc78475d9fbf4e7abef67356560b0009}}}{\*\bkmkend https://www.notion.so/datacove}}\cell\row\pard\trowd\trql\trleft-36\ltrrow\trpaddft3\trpaddt0\trpaddfl3\trpaddl0\trpaddfb3\trpaddb0\trpaddfr3\trpaddr0\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx449\clbrdrt\brdrs\brdrw10\brdrcf1\clbrdrl\brdrs\brdrw10\brdrcf1\clpadft3\clpadt36\clbrdrb\brdrs\brdrw10\brdrcf1\clbrdrr\brdrs\brdrw10\brdrcf1\clpadfr3\clpadr36\clvertalt\cellx9179\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar\loch +\cell\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\intbl\ql\nowidctlpar\ltrpar{\loch\fs20\lang1033\loch +Assist with updating PIS or Environments as needed.}\cell\row\pard\pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\ql\nowidctlpar\ltrpar\loch\fs24\loch + +\par \pard\plain \s0\rtlch\af10\afs24\alang1081 \ltrch\lang1033\langfe2052\hich\af5\loch\widctlpar\hyphpar0\ltrpar\f5\fs24\cf0\lang1033\kerning1\dbch\af9\langfe2052\sl240\slmult1\ql\ltrpar\loch + +\par } \ No newline at end of file diff --git a/docs/how-tos/administrate-east-us-a-aks-cluster.md b/docs/how-tos/administrate-east-us-a-aks-cluster.md new file mode 100644 index 00000000..536ea8a6 --- /dev/null +++ b/docs/how-tos/administrate-east-us-a-aks-cluster.md @@ -0,0 +1,53 @@ +# Administrate east-us-a AKS cluster + +## Permissions + +1. Ask an administrator to create you a datacoves (microsoft) user. https://admin.microsoft.com. +2. Ask an administrator to add you to the `DevOps` [group](https://portal.azure.com/#view/Microsoft_AAD_IAM/GroupDetailsMenuBlade/~/Members/groupId/3debb9f2-c29e-4485-81c7-d4644d359d1b). + +## Configure kubectl + +[Download Azure CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest). + +Login to your account: + +```bash +az login +``` + +Then, run the following commands: + +```bash +az account set --subscription 91bd2205-0d74-42c9-86ad-41cca1b4822b +az aks get-credentials --resource-group datacoves --name east-us-a +``` + +This will add a new context to `kubectl`, so you can now run: + +```bash +kubectl get pods -A +``` + +## Manage nodepools + +### List nodepools + +List nodepools in the `datacoves` resource group, `east-us-a` cluster: + +```sh +az aks nodepool list --cluster-name east-us-a --resource-group datacoves +``` + +### Add workers nodepool + +```sh + az aks nodepool add --cluster-name east-us-a --resource-group datacoves --name workerslarge --mode User --enable-cluster-autoscaler --min-count 1 --max-count 10 --node-vm-size Standard_D4s_v3 --labels k8s.datacoves.com/workers=enabled +``` + +## Modify existing nodepool to add new labels + +Let's add a new label `k8s.datacoves.com/workers=enabled` to an existing nodepool which already has the label `k8s.datacoves.com/nodegroup-kind=general`. Old a new labels need to be specified. + +```sh +az aks nodepool update --cluster-name east-us-a --resource-group datacoves --name generallarge --labels {k8s.datacoves.com/workers=enabled,k8s.datacoves.com/nodegroup-kind=general} +``` \ No newline at end of file diff --git a/docs/how-tos/airflow-configuration.md b/docs/how-tos/airflow-configuration.md new file mode 100644 index 00000000..68c6c247 --- /dev/null +++ b/docs/how-tos/airflow-configuration.md @@ -0,0 +1,23 @@ +## Environment variables override +Airflow has a feature that lets you override system's defaults on a per-task basis (see https://airflow.apache.org/docs/apache-airflow/stable/core-concepts/executor/kubernetes.html#pod-override). + +__Example "Log level override"__: + +```python +"pod_override": k8s.V1Pod( + spec=k8s.V1PodSpec( + containers=[ + k8s.V1Container( + name="base", + image=f"{IMAGE_REPO}:{IMAGE_TAG}", + env=[ + k8s.V1EnvVar( + name="AIRFLOW__LOGGING__LOGGING_LEVEL", + value="DEBUG" + ) + ] + ) + ] + ) +), +``` diff --git a/docs/how-tos/billing-system.md b/docs/how-tos/billing-system.md new file mode 100644 index 00000000..8dfb835e --- /dev/null +++ b/docs/how-tos/billing-system.md @@ -0,0 +1,109 @@ +# Datacoves Billing System + +This document provides comprehensive information on Datacoves’ billing integration with Stripe. + +# Introduction + +Datacoves integrates with Stripe to manage billing by listening to Stripe events and adjusting Datacoves data accordingly. The system also modifies subscriptions when changes occur in services, users, or tally marks. + +The connection between Datacoves and Stripe begins when a user creates a subscription through the Setup Wizard, or when a Datacoves Admin sets up a subscription directly in the Stripe UI. + +**Note**: Free trial accounts **are not connected** to Stripe. + +# Account Setup Wizard + +![Account Setup Wizard](img/setup-wizard.png) + +## Customer Types + +For billing, we distinguish between three customer types: + + +1. Free trial customers +2. Credit card customers +3. Check / bank transfer customers + +## Free trial customers + +These type of customers are not connected to stripe while they're on trial. During the trial period, Stripe does not have information about these accounts. + +Free trial customers will see a button on the header inviting them to finalize the trial and create a subscription. Upon subscribing, they transition to `credit card customers`. + +## Credit card customers + +Credit card customers workflow is completely managed by Datacoves: + +1. Customer selects `Growth Plan` and after clicking on `Next` Datacoves creates the stripe customer, sets the `customer_id` and redirects them to Stripe billing page where the stripe billing process begins. + +2. Once the customer enters their credit card and completes the Stripe billing process, Datacoves receives a notification and sets the `subscription` payload on the brand new account. + +3. From this point, any updates to services/users/tally marks in the Datacoves account are automatically reflected in Stripe, adjusting invoices accordingly. + +## Check / bank customers + +For customers preferring bank transfers or checks, setup is managed manually through the Stripe UI. + +### Customer Setup + +1. Customer creates a Trial account as described earlier. +2. An Stripe Admin [manually creates the customer](https://dashboard.stripe.com/customers) using the Stripe UI. In order to follow the same convention used by Datacoves, please use account's `slug` as the stripe customer name, and account's `owner email` as the stripe customer email. Add an address to calculate taxes automatically. +3. Once you got a customer id on stripe, modify the Datacoves account on the admin panel and set it on the `Customer Id` field. +4. Modify the Datacoves account once more and set the right `plan` and `variant`. We typically use `growth` plan for these accounts, the `variant` will be determined by Sales depending on the pricing negotiated. + +### Subscription Setup + +The Stripe Admin now [creates a subscription on Stripe](https://dashboard.stripe.com/subscriptions?status=active&create=subscription) for the recently created customer. Please be cautious with the products included in the subscription, they should match exactly the products included in the plan. You can inspect them [here](https://api.east-us-a.datacoves.com/panel/billing/plan/). + +You don't need to add the metered products on a new subscription, Datacoves will modify the subscription and add them later. Unless the customer prepaid for developer seats and services, you include the developers seat product specifying the total user licenes and then one product line per service (airbyte, airflow, superset, datahub). +In the following example, there are 5 user licences, 1 Airbyte, 1 Airflow and 1 Superset server: + +![Create Subscription](img/create-subscription.png) + +NOTE: Certain customers (like Guitar Center) could opt to prepay the developer seats and services costs via Bank transfer / check. In those cases, you only include the metered products in the subscription. + +Don't forget to set the right plan on the subscription metadata, it's usually `growth-monthly`, if you need a different one, type the `slug` field of the chosen one. + +![Subscription Plan](img/subscription-metadata.png) + +On Payment, select `Email invoice to the customer to pay manually` and uncheck `Include a stripe-hosted link to an invoice payment page in the invoice`. + +Finalize by clicking on `Create subscription`. + +Go to the Django admin panel and check that the account has a `JSON` dict on the field `subscription`. If it does, it means the connection is set, you can now finalize the trial by setting a past end date in the `Trial ends at` field (or by just removing trial start and end dates). + +# Add credit to customer + +Once the subscription was created, the customer will start generating a debt. +As soon as Datacoves receives a check or wire, a Stripe Admin needs to register it on the Django Admin, as follows: +Note that credits have a validity period, during that period the developer licences or services specified will be discounted from the invoice. + +1. Go to Accounts, select the customer's account and edit it. +2. Scroll down until you see the `Credits` area. +3. Click on `Add another credit` and complete the required fields including as much information as possible in the reference field. +4. Click on `Save`. + +![Add credit to Account](img/add-credit-to-account.png) + +## F.A.Q. + +### How do I configure my local environment to test Stripe? + +First of all, you need to set to `True` the feature `accounts_signup` on the only record you have in the `Cluster` model. + +Then, if you're using `datacoveslocal.com` and you were granted permissions automatically to the `local` account, you need +to remove all the permissions to such account, doing that the Datacoves UI will allow you creating a new account using the +setup wizard. + +You should also set `setup enabled` on `True` on the admin panel for you user. + +Then, navigate to https://datacoveslocal.com/account-setup/ and follow the instructions to create an account using Stripe. + +### How do I run the stripe webhooks locally to test billing integration? + +Run `./cli.py stripe_webhooks` and follow the instructions. + +### How to sync stripe live products with test products? + +Sometimes you modified the live products (prices/descriptions) and you need to update the test ones. + +Just run `./cli.py copy_to_stripe_test` to run the live -> test sync process. diff --git a/docs/how-tos/celery-monitoring.md b/docs/how-tos/celery-monitoring.md new file mode 100644 index 00000000..c7860af9 --- /dev/null +++ b/docs/how-tos/celery-monitoring.md @@ -0,0 +1,48 @@ +# Celery monitoring + +For authoritative, more detailed information, see [celery's monitoring guide](https://docs.celeryq.dev/en/stable/userguide/monitoring.html). + + +## UI + +We run the flower UI at `https://flower.{cluster_domain}`. You can see executed +tasks by clicking on tasks, or navigating to `https://flower.{cluster_domain}/tasks`. +You'll want to sort tasks to see the latest Started or Received at the top. +You can filter by task using the Search input. The UI doesn't refresh live. +Increasing the number of shown entries can be helpful. + + +## CLI + +From a core-api pod (`kcc exec -it $api_pod_name -- bash`) you can invoke +celery inspect. One useful thing to do is check the stats. + +``` +celery -A datacoves inspect stats +``` + +Here's an excerpt from the output. + +``` +... + "total": { + "billing.tasks.inform_billing_events": 113, + "billing.tasks.tally_account_resource_usage": 1, + "billing.tasks.tally_resource_usage": 1, + "celery.backend_cleanup": 1, + "clusters.workspace.sync_task": 1211, + "iam.tasks.clear_tokens": 1, + "iam.tasks.remove_missing_user_groups": 1, + "notifications.tasks.send_slack_notification": 7, + "projects.tasks.delete_unused_project_keys": 1, + "projects.tasks.remove_unused_environments": 1, + "projects.tasks.remove_unused_user_volumes": 1, + "projects.tasks.stop_sharing_codeservers": 38, + "projects.tasks.turn_off_unused_workspaces": 1134 + }, + "uptime": 68132 +... +``` + +The uptime is 68132 seconds, and the sync_task has run 1211 times, so there's +been one run every 56 seconds in average. diff --git a/docs/how-tos/choose-ec2-nodes.md b/docs/how-tos/choose-ec2-nodes.md new file mode 100644 index 00000000..1ce82299 --- /dev/null +++ b/docs/how-tos/choose-ec2-nodes.md @@ -0,0 +1,42 @@ +# Choosing an ec2 instance type and estimating pods per node + +[AWS docs.][choosing] + +## Pod limit from network constraints + +Every pod must have an IP. EC2 instances have a maximum number of IPs, which +limits the number of pods per node. [source][eni-max-pods] + +With CNI version 1.9 or higher and nitro instances, [the pod limit can be increased][eni-max-pods-update]. +For example: + +``` + $ ./max-pods-calculator.sh --instance-type m5.large --cni-version 1.9.0 +29 + + $ ./max-pods-calculator.sh --instance-type m5.large --cni-version 1.9.0 --cni-prefix-delegation-enabled +110 + +# For ensembledev.apps.jnj.com: +$ ./max-pods-calculator.sh --instance-type m5.4xlarge --cni-version 1.7.1 +110 +``` + +[List of ENI and IP limits per instance type][instance-max-eni]. + + +## Pod limit from volume attachment limits + +Currently some of our pods (code-server) require ELB volumes. EC2 instances have +a maximum number of volumes that can be attached. For "most" nitro instances, the +sum of ENIs, volume attachments and instance store volumes must be less than 28. +[source][vol-max-pods]. Volume attachments seem capped by 26 because the mount +points use the a letter of the alphabet each. + + + +[choosing]: https://docs.aws.amazon.com/eks/latest/userguide/choosing-instance-type.html +[eni-max-pods]: https://raw.githubusercontent.com/awslabs/amazon-eks-ami/master/files/eni-max-pods.txt +[eni-max-pods-update]: https://aws.amazon.com/blogs/containers/amazon-vpc-cni-increases-pods-per-node-limits/ +[instance-max-eni]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html#AvailableIpPerENI +[vol-max-pods]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html diff --git a/docs/how-tos/codeserver-images.md b/docs/how-tos/codeserver-images.md new file mode 100644 index 00000000..e29a2c97 --- /dev/null +++ b/docs/how-tos/codeserver-images.md @@ -0,0 +1,43 @@ +Check the versions, but these are the standard Datacoves VS Code extensions: + +SQLFluff is a SQL linter with dbt support +https://datacoves-vs-code-images.s3.amazonaws.com/dorzey.vscode-sqlfluff-3.2.0.vsix + +This extensions adds grid (excel like) editing for CSV files +https://datacoves-vs-code-images.s3.amazonaws.com/janisdd.vscode-edit-csv-0.10.0.vsix + +Standard VS Code Python extension +https://datacoves-vs-code-images.s3.amazonaws.com/ms-python.python-2024.14.1.vsix + +This adds yml validations +https://datacoves-vs-code-images.s3.amazonaws.com/redhat.vscode-yaml-1.15.0.vsix + +This adds "short cuts" to VS Code like the "run current model" and "more.." button +https://datacoves-vs-code-images.s3.amazonaws.com/RobertOstermann.better-status-bar-1.0.9.vsix + +This adds Jinja support, I think it is dbt-jinja +https://datacoves-vs-code-images.s3.amazonaws.com/samuelcolvin.jinjahtml-0.20.0.vsix + +This adds items to the file context menu like "Duplicate" +https://datacoves-vs-code-images.s3.amazonaws.com/sleistner.vscode-fileutils-3.10.3.vsix + +This adds spell checking +https://datacoves-vs-code-images.s3.amazonaws.com/streetsidesoftware.code-spell-checker-3.0.1.vsix + +This is our Power User Extension that adds things like query preview and near real time linting +https://datacoves-vs-code-images.s3.amazonaws.com/vscode-datacoves-power-user-0.9.16.vsix + +Python Ruff linter, main use case is to show vars and imports not being used in a .py file +https://datacoves-vs-code-images.s3.amazonaws.com/charliermarsh.ruff-2024.56.0.vsix + +This adds colors to each column of a CSV file +https://datacoves-vs-code-images.s3.amazonaws.com/mechatroner.rainbow-csv-3.3.0.vsix + +This is part of the Datacoves install for Snowflake Envs +https://datacoves-vs-code-images.s3.amazonaws.com/snowflake.snowflake-vsc-1.10.5.vsix + +SQLTools I cant find where you got this from on github and it no longer in Orrum since I deleted it. +It is used on non-Snowflake envs like Artemis + +This is a chat gpt extension that is NOT our default, but has been added in a few places, like orrum and cold bore. Datacoves co-pilot will make this obsolete +https://datacoves-vs-code-images.s3.amazonaws.com/timkmecl.chatgpt-1.1.2.vsix diff --git a/docs/how-tos/connect-to-kenvue-cluster-using-a-bastion.md b/docs/how-tos/connect-to-kenvue-cluster-using-a-bastion.md new file mode 100644 index 00000000..d5fcdddb --- /dev/null +++ b/docs/how-tos/connect-to-kenvue-cluster-using-a-bastion.md @@ -0,0 +1,79 @@ +# How to connect to kenvue cluster using a bastion + +## SSH to bastion + +ssh @AWSWEXNVAL0001.kenvue.com + +## Set up your user enviornment + +Install kubectl and aws-iam-authenticator + +``` +mkdir bin +cd bin +curl -Lo aws-iam-authenticator https://github.com/kubernetes-sigs/aws-iam-authenticator/releases/download/v0.5.9/aws-iam-authenticator_0.5.9_linux_amd64 +chmod +x aws-iam-authenticator + +cd .. +curl -Lo kuberlr.tar.gz https://github.com/flavio/kuberlr/releases/download/v0.4.2/kuberlr_0.4.2_linux_amd64.tar.gz +tar -xzvf kuberlr.tar.gz + +cd kuberlr_0.4.2_linux_amd64/ +mv kuberlr ../bin/ +cd ../bin +ln -s kuberlr kubectl +cd .. +``` + +## Configure your ~/.kube/config + +``` +mkdir .kube +cat << EoF > .kube/config2 +apiVersion: v1 +clusters: +- cluster: + server: https://BD0F1A58014FCF446B668A876EE7DF2A.gr7.us-east-1.eks.amazonaws.com + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRJeU1UQXlOVEV4TlRNMU1Gb1hEVE15TVRBeU1qRXhOVE0xTUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBT2JpCmFhOUFvSDVlWGpMeFdnQzBONE5JUHVQSVptNmpLNmxBM29sTVAwUHYyd1hlalphcEFsVnFOWVdxcHl3aCtZZm8KT1lLR1Nuc2hPdE9DbnVyU094SVhoY1BnR1ZmN1REVlZGbU04WW5KSzBmOHdLWmxLdDNIYU9oWFJkekNZYkJoMgoydnpZSGx0ZGREbHkvTHpwaWpNQlpNRHY1UUtkeEhNSEF0aUd6aG4xS2xvT2xkRGozV1lpV1VJV0ladzZheWV2CnNhYm1Rd3A1REJwQjBVN3V2bEdMd1RUQ3RZc3NhdnI2dDZ6MWtzNHhNUUMxVTlONUlHV0UxdEUrZGZwMmZzWDYKZ3d1c0tEOGNESkFiVmFrL2lwK3pkcXRxRnJHOVFNeDBEelpQYzRtU1dnVDZyVXZjbTlBbTlrMVNsSXc5ODlGRApHelh6bGxQcXZySWNnU1RWSW9jQ0F3RUFBYU5DTUVBd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0hRWURWUjBPQkJZRUZLNnJEeXBRK3VReGgxWU8zS0JKbmthYU1TNUdNQTBHQ1NxR1NJYjMKRFFFQkN3VUFBNElCQVFCdk52clZjRjFaZ1FDMzNpbDZrR0gzcHJJN3RWRmcvOTF3UVNZZkM2SFM2cWRiVERucwpNYXhoeEYvblZzbFEyKzRmN0UxVUZodUdsOUdUZlVvS2FiQzB1cWx6bUpQaDJVUXJRZ3hZQnd3eGxTOSszcHJNCnlUOGZ5M29uM21jaWR0azZlSllIcm5wZS9QZnlWN1J5eUhva0pVVGIwcWFVakxoMVZHVFoyRmJLK0ZjeG50SHcKdWJ4bnlSMHZlcGExdDFoOVljNDFJYnFzUGRBMVFDZVYvR1hNdWN4Z0U4bUd1VFZQQlU1MEdYbG1qWnRZVjg5dgp3TVpYTVVobzNmakdQNVVnMnlFTmtXaW9Ra2hqUkRMRUZGQXpZUzMrSU5TWnAwMklBUTRRNkNSYnJ0Vmc5ZDFrCkY4d1FzaytJUXUrMnE3T25WOUs5cUdYeXdrakNSd0ZTV1N2UwotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + name: kubernetes +contexts: +- context: + cluster: kubernetes + user: aws + name: aws +- context: + cluster: kubernetes + user: aduser + name: user +current-context: aws +kind: Config +preferences: {} +users: +- name: aws + user: + exec: + apiVersion: client.authentication.k8s.io/v1beta1 + command: aws-iam-authenticator + args: + - "token" + - "-i" + - "itx-wcr-datacove-development" + - "-r" + - "arn:aws:iam::551241293703:role/itx/service/EKS/VPCxEKSRole" +- name: aduser + user: + auth-provider: + config: + apiserver-id: "22f9d484-b818-4b21-a278-00b264446505" + client-id: "22f9d484-b818-4b21-a278-00b264446505" + environment: AzurePublicCloud + tenant-id: "7ba64ac2-8a2b-417e-9b8f-fcf8238f2a56" + name: azure +EoF +``` + +## Connect to cluster + +``` +kubectl get nodes +``` diff --git a/docs/how-tos/custom-dns.md b/docs/how-tos/custom-dns.md new file mode 100644 index 00000000..181a46f9 --- /dev/null +++ b/docs/how-tos/custom-dns.md @@ -0,0 +1,56 @@ +# About this Documentation +Some customers (like Orrum) require a custom internal DNS. This will require adding a new coredns custom config map: + +``` +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns-custom + namespace: kube-system +data: + sftp.orrum.com.server: | + sftp.orrum.com:53 { + forward . 172.31.150.10 172.31.160.20 + } +``` + +Change 'sftp.orrum.com' to whatever pattern needs to go to the custom DNS, and the IP addresses to the addresses of the DNS servers to resolve the address. + +Then you can patch the coredns deployment: + +``` +kubectl -n kube-system patch deployment coredns \ + --type='json' \ + -p='[ + { + "op": "add", + "path": "/spec/template/spec/volumes/-", + "value": { + "name": "custom-coredns", + "configMap": { + "name": "coredns-custom" + } + } + }, + { + "op": "add", + "path": "/spec/template/spec/containers/0/volumeMounts/-", + "value": { + "name": "custom-coredns.server", + "mountPath": "/etc/coredns/custom" + } + } + ]' +``` + +Then restarts the deployment: + +``` +kubectl rollout restart deployment coredns -n kube-system +``` + +And test with nslookup: + +``` +kubectl -n core exec -ti workbench-c6599969b-k4p5w -- nslookup sftp.orrum.com +``` \ No newline at end of file diff --git a/docs/how-tos/datacoves-versioning.md b/docs/how-tos/datacoves-versioning.md new file mode 100644 index 00000000..d97a2bc1 --- /dev/null +++ b/docs/how-tos/datacoves-versioning.md @@ -0,0 +1,46 @@ +# Datacoves versioning + +We use [semantic versioning](https://semver.org/) in all our images, and datacoves releases. + +`MAJOR.MINOR.PATCH` + +where `MAJOR.MINOR` are read from `.version.yaml` and used every time a new image is +pushed to docker repository and `PATCH` is autogenerated (timestamp). + +## Our criteria + +### When do we bump the `MAJOR` version? + +When we make incompatible changes or we introduce compatible changes but deprecate features: + +- Any python library upgrade (including dbt) that requires changes in the customer's analytics(dbt) git repo +- Airbyte, Airflow, DataHub, Superset upgrades that require reconfiguration +- Datacoves core changes that require human intervention +- Airbyte, Airflow, DataHub, Superset that do not require reconfiguration, but several features are being deprecated + +### When should we bump the `MINOR` version? + +- When we make compatible changes, such as new features or upgrade dependencies +- Patch version changes to dbt e.g. 1.8.3 to 1.8.5 +- Compatible updates to dbt e.g. 1.7.x to 1.8.x +- Compatible update to Airbyte, Airflow, DataHub, Superset that do not require reconfiguration + +### Everything else is a `PATCH` + +- Bug fixes, performance enhancements + +## Images tags + +Images are pushed with the folling tags: + +- MAJOR +- MAJOR.MINOR +- MAJOR.MINOR.PATCH +- MAJOR.MINOR.PATCH-\ +- latest + +CI servers that eventually use datacoves images could reference any of them, depending on how specific they need to be. + +## Releases + +Releases follow the same versioning criteria, they are generated by running the `./cli.py generate_release` command, or by triggering the `Generate Release` GitHub workflow. diff --git a/docs/how-tos/debug-airflow-workers.md b/docs/how-tos/debug-airflow-workers.md new file mode 100644 index 00000000..3f1d5134 --- /dev/null +++ b/docs/how-tos/debug-airflow-workers.md @@ -0,0 +1,56 @@ +# Debug Airflow Workers + +## How to review if there are error in git-sync/s3-sync containers? + +We have already enabled the functionality in `git-sync` to retry a maximum of three times. If the synchronization with `git-sync` or `s3-sync` is not successful, the worker will fail, therefore the Airflow task will also fail. + +To get the logs from `git-sync` or `s3-sync` we need to filter by namespace and by container. Below are some examples of how to do it. + +1. Go to **Grafana** i.g. `https://grafana.` +2. Go to **Explore** select `Loki` datasource and perform the query with the following filters: + +- **Namespace** = *dcw-my-slug-environment* +- **Container** =~ *git-sync**/*s3-sync**/ + +Examples: + +```bash +# git-sync +{namespace="dcw-dnr240", container="git-sync"} |= `` + +# s3-sync +{namespace="dcw-dnr240", container="s3-sync"} |= `` +``` + +![Find log for git-sync or s3-sync](img/debug-airflow-workers-3-min.png) + +## How to get Airflow workers? + +1. Go to **Grafana** i.g. `https://grafana.` +2. Go to **Explore** select `Loki` datasource and perform the query with the following filters: + +- **Namespace** = *dcw-my-slug-environment* +- **Pod** =~ *my-slug-environmet-airflow-scheduler.** +- **Line contains** |= *my-task* + +*Note: Remember that you have to adjust the date and time parameters depending on the search you want to perform.* + +E.g.: +```bash +{namespace="dcw-prd001", pod=~"prd001-airflow-scheduler.*"} |= `t_id_MDM_extraction_V_ENS2_SALES_ADJUSTMENTS_streamsets` +``` + +![Find pod by task](img/debug-airflow-workers-1-min.png) + +3. Copy the pod name +4. Go to **Explore** select `Loki` and perform the query with the following filters: + +- **Namespace** = *dcw-my-slug-environment* +- **Pod** = *pod-name* + +E.g.: +```bash +{namespace="dcw-prd001", pod="emeaelmdmprdtidmdmextractionve-295567f106ff46139ad4edf24e52fc31"} |= `` +``` + +![Find pod by name](img/debug-airflow-workers-2-min.png) diff --git a/docs/how-tos/debug-dbt-errors-when-return-code-is-not-zero.md b/docs/how-tos/debug-dbt-errors-when-return-code-is-not-zero.md new file mode 100644 index 00000000..8048f264 --- /dev/null +++ b/docs/how-tos/debug-dbt-errors-when-return-code-is-not-zero.md @@ -0,0 +1,32 @@ +## How to debug dbt on production environments, i.e. Airflow? + +Sometimes when you run a dbt command on the command line, i.e. `dbt deps`, `dbt compile`, there are silent errors, and you just got an errorcode > 0. + +To debug it, you should run it programatically using python: + +### Run python in the command line + +```sh +$ python +``` + +### Run the desired command right in the python console + +```python +from dbt.cli.main import dbtRunner, dbtRunnerResult + +# initialize +dbt = dbtRunner() + +# create CLI args as a list of strings +cli_args = ["deps"] + +# run the command +res: dbtRunnerResult = dbt.invoke(cli_args) + +# inspect the results +for r in res.result: + print(f"{r.node.name}: {r.status}") +``` + +To know more, see https://docs.getdbt.com/reference/programmatic-invocations. \ No newline at end of file diff --git a/docs/how-tos/grafana-grant-permisions.md b/docs/how-tos/grafana-grant-permisions.md new file mode 100644 index 00000000..189de8b1 --- /dev/null +++ b/docs/how-tos/grafana-grant-permisions.md @@ -0,0 +1,9 @@ +# Grant permissions to Grafana + +1. Go to [Django admin groups](https://api.datacoveslocal.com/panel/auth/group/). +2. Edit a group that has your user. +3. Search `Grafana` permissions and `Choose all` (See image). +4. Save the group. +5. Go to [Grafana](https://grafana.datacoveslocal.com/) + +![Grant permissions in Grafana](img/grant_permissions_in_grafana.png) diff --git a/docs/how-tos/grafana-loki-storage-config-providers.md b/docs/how-tos/grafana-loki-storage-config-providers.md new file mode 100644 index 00000000..2bd45448 --- /dev/null +++ b/docs/how-tos/grafana-loki-storage-config-providers.md @@ -0,0 +1,110 @@ +# Grafana Loki Storage + +## Providers + +- [AWS S3](#create-and-configure-aws-s3) +- [Azure Blob Storage](#create-and-configure-azure-blob-storage) + + +## AWS S3 + +### Permission + +Limited: List, Read, Write + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:DeleteObject", + "s3:DeleteObjectVersion" + ], + "Resource": ".../*" + }, + { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "..." + } + ] +} +``` + +### Create and Configure Life Cycle + +1. Find and select **S3 Services**. +2. Click on **Create Bucket**. +3. `General configuration` you must choose the region y the name of bucket. Other values can be defaulted. +4. We need to create two `Lifecycle rules` to rotate our logs. +5. Select the new Bucket and then select `Management` tab. +6. Click `Create lifecycle rule`. +7. `Lifecycle rule configuration` you have to fill in the name: e.g: `Delete all fake objects after 30 days`. +8. **Prefix** you fill in `fake/` +9. **Lifecycle rule actions** you has to check `Expire current versions of objects`, +10. **Days after object creation** you must complete `30` +11. Save changes. +12. `Lifecycle rule configuration` you have to fill in the name: e.g: `Delete all index objects after 30 days` +13. **Prefix** you fill in `index/` +14. **Lifecycle rule actions** you has to check `Expire current versions of objects`, +15. **Days after object creation** you must complete `30` +16. Save changes. + +*We must have two rules.* + +![Lifecycle rule configuration](img/loki-aws-3-min.png) + +*Example of a rule.* + +![Lifecycle rule configuration](img/loki-aws-1-min.png) + +![Lifecycle rule configuration](img/loki-aws-2-min.png) + +## Azure Blob Storage + +### Create and configure Azure Blob Storage + +1. Create new resource `Storage account`. +2. Select your **Subscription** and **Resource group**. +3. Complete the `Storage account name`. +4. Click **Review** (Other values can be defaulted). +5. Click **Create** (Other values can be defaulted). + +![Azure blob storage account](img/loki-azure-1-min.png) + +6. Select your new `Storage account`. +7. Click on **Containers** and add new container. + +![Azure blob storage storage container](img/loki-azure-6-min.png) + +![Azure blob storage storage container](img/loki-azure-2-min.png) + +8. Select `Lifecycle management` and `Add a rule` to create new rule to rotate our logs. + +![Azure blob storage storage container](img/loki-azure-3-min.png) + +9. On **Details** tab we must complete the name (Delete all objects after 30 days) and select `Limit blobs with filter`. + +![Azure blob storage storage lifecycle](img/loki-azure-4-min.png) + +10. On **Filter set** tab we must add two **Blob prefix**: `/fake/` and `/index/`. + +![Azure blob storage storage lifecycle](img/loki-azure-5-min.png) + +11. Click **Create** + +### Get configuration data + +1. **Account name** is the name of `storage account`. +2. Click on **Account key** (Key1) + +![Azure blob storage account key](img/loki-azure-7-min.png) + +3. Select your **Container** and then **Properties** + +![Container properties](img/loki-azure-8-min.png) diff --git a/docs/how-tos/grafana-loki-storage-config.md b/docs/how-tos/grafana-loki-storage-config.md new file mode 100644 index 00000000..982f4077 --- /dev/null +++ b/docs/how-tos/grafana-loki-storage-config.md @@ -0,0 +1,51 @@ +# Grafana Loki Storage Configuration + +There are three different providers to configure **Loki** storage: + +- AWS S3 +- Azure Blob Storage +- Minio (Local development) + +## Notes + + - Minio is not responsible for log rotation, the logs lifecycle must be configured in your provider. + - How to configure the provider? [here](grafana-loki-storage-config-providers.md) + +To configure the cluster you must add the configuration to the configuration repository as a secret in `/cluster-params.secret.yaml` +for example to our local environment `datacoveslocal.com/cluster-params.secret.yaml` + +## Minio (Local development) + +```shell +grafana: + ... + loki: + provider: minio + password: ... +``` + +## AWS S3 + +```shell +grafana: + ... + loki: + provider: aws + region: + access_key: ... + secret_key: ... + bucket: +``` + +## Azure Blob Storage + +```shell +grafana: + ... + loki: + provider: azure + account_name: ... + account_key: ... + container_name: + endpoint_suffix: +``` diff --git a/docs/how-tos/hotfix.md b/docs/how-tos/hotfix.md new file mode 100644 index 00000000..2481ab69 --- /dev/null +++ b/docs/how-tos/hotfix.md @@ -0,0 +1,36 @@ +## How to Create a Hotfix + +A hotfix is defined as doing a targetted fix to an existing release. The idea behind a hotfix is to do the absolute minimum change to correct a high priority issue in a live release. + +To create a hotfix, one must first do the fix. First, create a branch from the release tag you wish to hot fix. Let's say you're hot-fixing release 'TAG_NAME'. You would do the following commands: + +``` +git fetch --all --tags +git checkout -b BRANCH_NAME refs/tags/TAG_NAME +``` + +You will now have a branch that is a copy of the release tag. You can either do your hotfix work directly on that branch and merge it to main later, or you can use `git cherry-pick` to pick commits from the main branch onto your new branch. If you need to use cherrypick and you don't know how, that is a larger topic than I want to cover here; Stephen can help you directly with that. + +Once you have done your work, you should **commit** to your branch and then compare your branch to the original tag. This will make sure you only changed what was needed: + +``` +git diff BRANCH_NAME..refs/tags/TAG_NAME +``` + +This command **is very important if you cherry-pick** to make sure you don't accidentally bring additional features or code that you do not intend to. However, it is good practice to review all code going into a hotfix very carefully. + +Once you are certain your hotfix is good, **push** it to the git repository. Now you're ready to build a hotfix release with cli.py. Do the following command: + +``` +./cli.py generate_hotfix +``` + +It will first show you `git status` to make sure your code is committed. Make sure there are no extra files or anything you don't want built into the release docker image present in your code tree. + +After you confirm, it will ask you which release you are making a hotfix from. This release must already be present in your `releases/` directory; if it is not, download the release with `./cli.py download_releases` or download the appropriate manifest directly from github. + +Then, it will ask you which images you wish to build. Select one or more images to build, or none if you are changing another dependency. + +After that, it will ask you if you want to change the version of any other image that is in the release. You can select none if you only want to build new images and you don't need to change any other dependencies. + +Finally, it will build your release and push it up as a draft in github. From that point, it is a normal release and you can take it through the normal process to get it installed. diff --git a/docs/how-tos/how-to-create-a-ssl-certificate.md b/docs/how-tos/how-to-create-a-ssl-certificate.md new file mode 100644 index 00000000..11a43eda --- /dev/null +++ b/docs/how-tos/how-to-create-a-ssl-certificate.md @@ -0,0 +1,27 @@ +# How to create an SSL certificate + +1. Install [acme.sh](https://github.com/acmesh-official/acme.sh#1-how-to-install) + +2. Configure the [cloudflare API token](https://github.com/acmesh-official/acme.sh/wiki/dnsapi#1-cloudflare-option) (getting `CF_Key` and `CF_Email` from 1Password). + +3. Run: + +```shell +# Let's Encrypt issuer +# https://github.com/acmesh-official/acme.sh/wiki/Server +acme.sh --issue --server letsencrypt --dns dns_cf -d --debug 2 + +# then +acme.sh --issue --server letsencrypt --dns dns_cf -d '*.' --debug 2 +``` + +4. Get certificate information (Optional) + +```shell +openssl x509 -text -noout -in +``` + +5. Copy ceftificates + +- Use `/fullchain.cer` and `/.key` as the root certificate and private key. Usually copied then to `base/root.cer` and `base/root.key`. +- Also, use `*./fullchain.cer` and `*./.key` as the wildcard certificate and private key. Usually copied then to `base/wildcard.cer` and `base/wildcard.key`. diff --git a/docs/how-tos/image.png b/docs/how-tos/image.png new file mode 100644 index 00000000..e1ee1f1c Binary files /dev/null and b/docs/how-tos/image.png differ diff --git a/docs/how-tos/img/add-credit-to-account.png b/docs/how-tos/img/add-credit-to-account.png new file mode 100644 index 00000000..2c97758e Binary files /dev/null and b/docs/how-tos/img/add-credit-to-account.png differ diff --git a/docs/how-tos/img/add-group-to-token.png b/docs/how-tos/img/add-group-to-token.png new file mode 100644 index 00000000..e9942f9d Binary files /dev/null and b/docs/how-tos/img/add-group-to-token.png differ diff --git a/docs/how-tos/img/admin-consent.png b/docs/how-tos/img/admin-consent.png new file mode 100644 index 00000000..7d26f30f Binary files /dev/null and b/docs/how-tos/img/admin-consent.png differ diff --git a/docs/how-tos/img/app-permissions-group.png b/docs/how-tos/img/app-permissions-group.png new file mode 100644 index 00000000..1b8f9e4d Binary files /dev/null and b/docs/how-tos/img/app-permissions-group.png differ diff --git a/docs/how-tos/img/app-permissions.png b/docs/how-tos/img/app-permissions.png new file mode 100644 index 00000000..ab64a7f4 Binary files /dev/null and b/docs/how-tos/img/app-permissions.png differ diff --git a/docs/how-tos/img/app-registrations.png b/docs/how-tos/img/app-registrations.png new file mode 100644 index 00000000..bbc974de Binary files /dev/null and b/docs/how-tos/img/app-registrations.png differ diff --git a/docs/how-tos/img/change-environment-profile.png b/docs/how-tos/img/change-environment-profile.png new file mode 100644 index 00000000..b30f33d7 Binary files /dev/null and b/docs/how-tos/img/change-environment-profile.png differ diff --git a/docs/how-tos/img/client-id-tentant-id.png b/docs/how-tos/img/client-id-tentant-id.png new file mode 100644 index 00000000..c4ac21bd Binary files /dev/null and b/docs/how-tos/img/client-id-tentant-id.png differ diff --git a/docs/how-tos/img/client-secret.png b/docs/how-tos/img/client-secret.png new file mode 100644 index 00000000..9296af2e Binary files /dev/null and b/docs/how-tos/img/client-secret.png differ diff --git a/docs/how-tos/img/cloud-pc.jpg b/docs/how-tos/img/cloud-pc.jpg new file mode 100644 index 00000000..0bffb81d Binary files /dev/null and b/docs/how-tos/img/cloud-pc.jpg differ diff --git a/docs/how-tos/img/create-subscription.png b/docs/how-tos/img/create-subscription.png new file mode 100644 index 00000000..2a5a2a12 Binary files /dev/null and b/docs/how-tos/img/create-subscription.png differ diff --git a/docs/how-tos/img/debug-airflow-workers-1-min.png b/docs/how-tos/img/debug-airflow-workers-1-min.png new file mode 100644 index 00000000..bfad16f3 Binary files /dev/null and b/docs/how-tos/img/debug-airflow-workers-1-min.png differ diff --git a/docs/how-tos/img/debug-airflow-workers-2-min.png b/docs/how-tos/img/debug-airflow-workers-2-min.png new file mode 100644 index 00000000..c44129bd Binary files /dev/null and b/docs/how-tos/img/debug-airflow-workers-2-min.png differ diff --git a/docs/how-tos/img/debug-airflow-workers-3-min.png b/docs/how-tos/img/debug-airflow-workers-3-min.png new file mode 100644 index 00000000..435681a9 Binary files /dev/null and b/docs/how-tos/img/debug-airflow-workers-3-min.png differ diff --git a/docs/how-tos/img/grant_permissions_in_grafana.png b/docs/how-tos/img/grant_permissions_in_grafana.png new file mode 100644 index 00000000..32b81e40 Binary files /dev/null and b/docs/how-tos/img/grant_permissions_in_grafana.png differ diff --git a/docs/how-tos/img/loki-aws-1-min.png b/docs/how-tos/img/loki-aws-1-min.png new file mode 100644 index 00000000..59ee49b2 Binary files /dev/null and b/docs/how-tos/img/loki-aws-1-min.png differ diff --git a/docs/how-tos/img/loki-aws-2-min.png b/docs/how-tos/img/loki-aws-2-min.png new file mode 100644 index 00000000..58b55aa5 Binary files /dev/null and b/docs/how-tos/img/loki-aws-2-min.png differ diff --git a/docs/how-tos/img/loki-aws-3-min.png b/docs/how-tos/img/loki-aws-3-min.png new file mode 100644 index 00000000..ee815dc9 Binary files /dev/null and b/docs/how-tos/img/loki-aws-3-min.png differ diff --git a/docs/how-tos/img/loki-azure-1-min.png b/docs/how-tos/img/loki-azure-1-min.png new file mode 100644 index 00000000..1f28ac4e Binary files /dev/null and b/docs/how-tos/img/loki-azure-1-min.png differ diff --git a/docs/how-tos/img/loki-azure-2-min.png b/docs/how-tos/img/loki-azure-2-min.png new file mode 100644 index 00000000..ede3a050 Binary files /dev/null and b/docs/how-tos/img/loki-azure-2-min.png differ diff --git a/docs/how-tos/img/loki-azure-3-min.png b/docs/how-tos/img/loki-azure-3-min.png new file mode 100644 index 00000000..c9b1834d Binary files /dev/null and b/docs/how-tos/img/loki-azure-3-min.png differ diff --git a/docs/how-tos/img/loki-azure-4-min.png b/docs/how-tos/img/loki-azure-4-min.png new file mode 100644 index 00000000..e3156e55 Binary files /dev/null and b/docs/how-tos/img/loki-azure-4-min.png differ diff --git a/docs/how-tos/img/loki-azure-5-min.png b/docs/how-tos/img/loki-azure-5-min.png new file mode 100644 index 00000000..00bec187 Binary files /dev/null and b/docs/how-tos/img/loki-azure-5-min.png differ diff --git a/docs/how-tos/img/loki-azure-6-min.png b/docs/how-tos/img/loki-azure-6-min.png new file mode 100644 index 00000000..92253f8f Binary files /dev/null and b/docs/how-tos/img/loki-azure-6-min.png differ diff --git a/docs/how-tos/img/loki-azure-7-min.png b/docs/how-tos/img/loki-azure-7-min.png new file mode 100644 index 00000000..c689def3 Binary files /dev/null and b/docs/how-tos/img/loki-azure-7-min.png differ diff --git a/docs/how-tos/img/loki-azure-8-min.png b/docs/how-tos/img/loki-azure-8-min.png new file mode 100644 index 00000000..e78eb676 Binary files /dev/null and b/docs/how-tos/img/loki-azure-8-min.png differ diff --git a/docs/how-tos/img/recovery-pv-get-subcription-id-min.png b/docs/how-tos/img/recovery-pv-get-subcription-id-min.png new file mode 100644 index 00000000..95ea2ce5 Binary files /dev/null and b/docs/how-tos/img/recovery-pv-get-subcription-id-min.png differ diff --git a/docs/how-tos/img/recovery-pv-min.png b/docs/how-tos/img/recovery-pv-min.png new file mode 100644 index 00000000..9fff01c5 Binary files /dev/null and b/docs/how-tos/img/recovery-pv-min.png differ diff --git a/docs/how-tos/img/register-new-app.png b/docs/how-tos/img/register-new-app.png new file mode 100644 index 00000000..bb9d112d Binary files /dev/null and b/docs/how-tos/img/register-new-app.png differ diff --git a/docs/how-tos/img/setup-wizard.png b/docs/how-tos/img/setup-wizard.png new file mode 100644 index 00000000..0711308e Binary files /dev/null and b/docs/how-tos/img/setup-wizard.png differ diff --git a/docs/how-tos/img/subscription-metadata.png b/docs/how-tos/img/subscription-metadata.png new file mode 100644 index 00000000..a6b42550 Binary files /dev/null and b/docs/how-tos/img/subscription-metadata.png differ diff --git a/docs/how-tos/img/token-configuration.png b/docs/how-tos/img/token-configuration.png new file mode 100644 index 00000000..e1a20c8d Binary files /dev/null and b/docs/how-tos/img/token-configuration.png differ diff --git a/docs/how-tos/install-python-reqs-on-jnj-bastion.md b/docs/how-tos/install-python-reqs-on-jnj-bastion.md new file mode 100644 index 00000000..05d80e84 --- /dev/null +++ b/docs/how-tos/install-python-reqs-on-jnj-bastion.md @@ -0,0 +1,11 @@ +# Install python requirements on bastion in JNJ + +``` +wget --no-check-certificate https://bootstrap.pypa.io/pip/3.6/get-pip.py && python3 get-pip.py --user +``` + +Then, cd into the datacoves_deployment cloned repo folder, and run: + +``` +pip install -r requirements.txt +``` diff --git a/docs/how-tos/list-code-server-pods-processes.md b/docs/how-tos/list-code-server-pods-processes.md new file mode 100644 index 00000000..fd0bb7a1 --- /dev/null +++ b/docs/how-tos/list-code-server-pods-processes.md @@ -0,0 +1,10 @@ +# List python processes running on certain namespace's code server pods + +```bash +#!/bin/bash +ns="dcw-dev001 +pods=$(kubectl -n $ns get pods | grep code-server | awk '{print $1, $8}') +for pod in $pods; do + kubectl -n $ns exec -ti $pod -- bash -c 'ps auxwf' | grep python +done +``` diff --git a/docs/how-tos/make-and-install-a-release.md b/docs/how-tos/make-and-install-a-release.md new file mode 100644 index 00000000..f9ff3ea6 --- /dev/null +++ b/docs/how-tos/make-and-install-a-release.md @@ -0,0 +1,75 @@ +## Make a new release + +To make a new release, from your development machine: + +```bash +cluster_domain=ensembletest.apps.jnj.com + +# Generate a new release. +git checkout main +git pull + +# Check that images are properly created in Github Actions +./cli.py generate_release +release= # The name of the release just generated. + +# [If release is targeted to a submodule customer] +# Check if any there's any config change requirement +./cli.py combined_release_notes # Inspect the output to check for configuration changes + +# Update the cluster configuration to reference the new release. +./cli.py set_release +cd config/$cluster_domain/ +git secret reveal -f # Only required if you modified secrets. +change configuration as required # Only required if you modified secrets. +git secret hide # Only required if you modified secrets. +git add -A +git diff --cached # Review what will be commited. +git commit +git push + +# Commit and push the changes to datacoves. +cd ../.. +git add -A +git diff --cached +git commit +git push +``` + +## Apply the release to a cluster + +### Localhost + +```bash +./cli.py install +``` + +### JNJ + +For jnj there's a git repository, datacoves_deployment, that mirrors the structure of +the datacoves repo but only contains scripts and configuration, not sources. + +To deploy first update the mirror: + +```bash +# Clone if needed. +mkdir -p ../jnj/asx-ahrx/datacoves_deployment +git clone ssh://git@sourcecode.jnj.com:3268/asx-ahrx/datacoves_deployment.git ../jnj/asx-ahrx/datacoves_deployment + +# Rsync the installer files into the datacoves_deployment repo +./cli.py rsync_installer ../jnj/asx-ahrx/datacoves_deployment/ + +# Point the config submodule to the latest version. +cd config/$cluster_domain/ +git pull +cd ../.. + +# Commit the changes. +git add -A +git diff --cached +git commit +``` + +SSH into a jnj machine with kubectl access to the cluster. Then follow +[datacoves_deployment](https://sourcecode.jnj.com/projects/ASX-AHRX/repos/datacoves_deployment/browse)'s +[documentation](../client-docs/jnj/5-deployment.md) to run the installation scripts. diff --git a/docs/how-tos/manage-profiles-and-image-sets.md b/docs/how-tos/manage-profiles-and-image-sets.md new file mode 100644 index 00000000..fa907d9d --- /dev/null +++ b/docs/how-tos/manage-profiles-and-image-sets.md @@ -0,0 +1,35 @@ +# Managing profiles and image sets + +## How to create and use a profile + image set? + +### 1. Create profile + +A profile is used to create a reusable preconfigured environment. + +1. Navigate to [profiles admin page](https://api.east-us-a.datacoves.com/panel/projects/profile/) and create a new one clicking on "Add Profile". +2. Review the checkboxes and uncheck the ones that are not appropiate, you might like to keep them all checked as suggested. +3. Add profile files accordingly. You might like to copy the exact same profile files configured on the [default profile](https://api.east-us-a.datacoves.com/panel/projects/profile/1/change/). + +### 2. Create image set + +Image sets are associated to profiles and they are used to build the images that will end up being used by code-server and/or airflow. + +1. Navigate to the [Image set admin page](https://api.east-us-a.datacoves.com/panel/projects/profileimageset/) and click on "Create new image set". +2. Choose the profile you just created in `Profile`. +3. Choose the release from where the new images are going to be based on, typically the last release. +4. Set the common python requirements for both airflow and code-server images in the `Python requirements` field. Take a look at the help text under the field. +5. Set the specific python requirements for airflow or code server in the fields `Airflow requirements` or `Code server requirements`. +6. Finally, configure the extensions you need installed in code-server by adding urls to the .vsix files in the `Code server extensions` field. +7. Hit "Save and continue editing". +8. Click on "Build image set" button in the top right corner of the form. A background process will be triggered to build the images. +9. Keep refreshing the page every 1 minute until the field `Images` get populated with the final images cooked. + +### 3. Start using you profile + +Once you profile and image set are ready, you need to edit the environment you want to change and set the corresponding `profile` in such field. Environments are edited [here](https://api.east-us-a.datacoves.com/panel/projects/environment/). + +![Change environment profile](img/change-environment-profile.png) + +### 4. Reload the workbench page + +That's all, reload the page and don't forget to prepare your [mate]() to enjoy your analytics journey even more ;) diff --git a/docs/how-tos/move-a-gpg-secret-key.md b/docs/how-tos/move-a-gpg-secret-key.md new file mode 100644 index 00000000..702a15a0 --- /dev/null +++ b/docs/how-tos/move-a-gpg-secret-key.md @@ -0,0 +1,24 @@ +# How to move a gpg secret key + +You should not reuse private gpg keys without thinking. However, it is more +convenient to have a single private key for your jnj email that is in all the +git secret keyrings of all the cluster config repos that you have access to. + +An easy way to transfer a key to a new installation server is to copy and paste +its base64: + +```bash +# From the machine that already has the key: +gpg --list-secret-keys +gpg --export-secret-key youremail@its.jnj.com | base64 +# Copy the output. +``` + +```bash +# From the installation machine: +cat | base64 -d > key.asc +# Paste and hit control D. +gpg --import key.asc +gpg --list-secret-keys +rm key.asc +``` diff --git a/docs/how-tos/onboard-a-new-project-on-datacoves.md b/docs/how-tos/onboard-a-new-project-on-datacoves.md new file mode 100644 index 00000000..215f128b --- /dev/null +++ b/docs/how-tos/onboard-a-new-project-on-datacoves.md @@ -0,0 +1,59 @@ +## 1. Create service accounts on snowflake (manually). + +- svc_datacoves: to change user private key +- svc_orchestration: airflow jobs +- svc_loader: airbyte/fivetran jobs +- svc_continuous_integration: CI jobs +- svc_business_intelligence: BI tool connection (optional) +- svc_business_intelligence_pii: BI tool connection for PII data (optional) + +## 2. Create user accounts on snowflake (manually) + +## 3. New project on appdevtools (on JnJ): + +- Bitbucket +- Jenkins +- Confluence + +## 4. Configure git service account access to repo + +## 5. Add SQL hook and template to set users private key on snowflake + +## 6. Create git repo structure using balboa repo as a reference: + +- load +- orchestrate +- automate +- dbt +- profiles.yml +- sample_blue_green.py +- docs +- secure +- .gitignore + +Depending on CI: + +- .github +- .gitlab-ci.yml +- Jenkinsfile + +CI job deploy to prod that: + +- generate dbt docs on dbt-docs branch +- runs dbt build on prod + CI job on PR that: +- validate branch names +- run pre-commit hooks + +## 7. Add airbyte connection on airflow + +## 8. Add new branch “airflow\_” for every env that is not `production` + +## 9. New dbt-docs branch + +## 10. Jenkins configuration + +- Git SA +- Snowflake SA + +## 11. Enable dbt-docs once index.html was placed on dbt-docs branch diff --git a/docs/how-tos/prometheus-queries.md b/docs/how-tos/prometheus-queries.md new file mode 100644 index 00000000..78eaf801 --- /dev/null +++ b/docs/how-tos/prometheus-queries.md @@ -0,0 +1,45 @@ +# Useful prometheus queries + +## node status with pressure + +```promql +sum by(node) (kube_node_status_condition{status="true", condition="DiskPressure"}) + +sum by(node) (kube_node_status_condition{status="true", condition="MemoryPressure"}) + +sum by(node) (kube_node_status_condition{status="true", condition="PIDPressure"}) +``` + +## pods memory filtering by pod name with regex + +```promql +sum by(pod) (container_memory_usage_bytes{namespace="", pod=~".*"}) +``` + +## containers cpu usage by node + +```promql +sum by(node) (rate(container_cpu_usage_seconds_total{node=""}[5m])) +``` + +## Node memory + +```promql +node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 +``` + +## Loki ingester chunk stored size + +```promql +loki_ingester_chunk_stored_bytes_total{job="loki"} +``` + +## Pods killed bec exceeding memory limit + +```promql +sum by(pod) (kube_pod_container_status_terminated_reason{reason="OOMKilled", namespace="dcw-prd001"}) +``` + +## Total worker nodes (measued by nodes running airflow worker pods) + +```promql +count (sum by (node) (kube_pod_info and on (pod) kube_pod_labels{label_airflow_worker!=""}) > 0) +``` \ No newline at end of file diff --git a/docs/how-tos/q-and-a.md b/docs/how-tos/q-and-a.md new file mode 100644 index 00000000..ed828e0b --- /dev/null +++ b/docs/how-tos/q-and-a.md @@ -0,0 +1,15 @@ +## Questions and Answers + +These are simple items that don't necessarily fit in elsewhere or need their own articles. + +### How do I start codeserver without validating the git repository credentials? + +Code servers use User Repository settings, and currently User Repositories only work with SSH keys. Sometimes, this is hard to deal with; if we can only use https authentication (i.e. from within J&J pulling an external repository) and we need a work-around. + +The workaround is simple; go to the Django panel. + +Pick User Repositories + +Pick the correct User Repository for your user and repo. + +Put a date and time in the "validated at" field and save it. So long as that isn't blank, it will allow you to start code server. diff --git a/docs/how-tos/recover-disk-on-aks.md b/docs/how-tos/recover-disk-on-aks.md new file mode 100644 index 00000000..5bd0c178 --- /dev/null +++ b/docs/how-tos/recover-disk-on-aks.md @@ -0,0 +1,136 @@ +# Recover disk (PV) from Azure Kubernetes Service. + +This guide describes how to move a disk from one Kubernetes cluster to another cluster. [More info](https://learn.microsoft.com/en-us/azure/aks/csi-disk-move-subscriptions) + +Steps: + +1. [Edit old pvc to Retain policy.](#edit-old-pvc-to-retain-policy) +2. [Get PV name.](#get-pv-name) +2. [Delete PVC to release the PV in the old cluster.](#delete-pvc-to-release-the-pv-in-the-old-cluster) +3. [Move the PV resource to new cluster using az cli.](#move-the-pv-resource-to-new-cluster-using-az-cli) +4. [Delete the PVC in the new cluster.](#delete-the-pvc-in-the-new-cluster) +5. [Create the PV and PVC in the new cluster.](#create-the-pv-and-pvc-in-the-new-cluster) + +## Edit old pvc to Retain policy + +The **persistent volume (PV)** that are created for **code server** has the delete policy, that means that when a disk is unbounded it is automatically deleted, therefore this policy must be modified to `Retain`. + +```shell +# Get the persistent volumes. E.g: +kubectl get pv + +# Edit the persistent volume. E.g: +kubectl patch pv pvc-2552cd9b-8231-409d-8b4b-a9d047415b53 -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' +``` + +## Get PV name + +```shell +# Get the persistent volumes. E.g: +kubectl get pv +``` + +## Delete PVC to release the PV in the old cluster + +It is necessary to remove the **persistent volume claim (PVC)** to release the **persistent volume (PV)**. + +```shell +# Get the persistent volumes. E.g: +kubectl -n dcw-dev123 get pvc + +# Edit the persistent volume. E.g: +kubectl -n dcw-dev123 delete pvc code-server-bru-10-config-volume +``` + +## Move the PV resource to new cluster using az cli + +1. Get the `cluster name` and `subcription id`. + +![Grant permissions in Grafana](img/recovery-pv-get-subcription-id-min.png) + +2. Get the node resources groups. We will need the origin and destination. + +```shell +# Get the node resources group +az aks show --resource-group --name --query nodeResourceGroup -o tsv +``` + +3. Get the id disk. + +```shell +# Get the origien node resource group. E.g: +az disk list --resource-group +``` + +![Grant permissions in Grafana](img/recovery-pv-min.png) + +4. Move the disk. + +```shell +az resource invoke-action --action moveResources --ids "/subscriptions//resourceGroups/" --request-body "{ \"resources\": [\"\"],\"targetResourceGroup\":\"/subscriptions//resourceGroups/\" }" +``` + +## Delete the PVC in the new cluster. + +This step is only necessary if the **persistent volume claim (PVC)** already exists. + +```shell +# Get the persistent volumes. E.g: +kubectl -n dcw-dev123 get pvc + +# Edit the persistent volume. E.g: +kubectl -n dcw-dev123 delete pvc code-server-bru-10-config-volume +``` + +## Create the PV and PVC in the new cluster + +Create the following file `pvc.yaml` with the names and namespace correct. + +- **pv-name**: E.g: `pvc-2581bfb0-b56a-4fbd-b302-67cf0ab43499` +- **pvc-name**: If you deleted the pvc, the name should be the same. E.g: `code-server-bru-10-config-volume` +- **namespace**: Kubernetes namespace to be applied. +- **disk-id-full-path**: E.g: `/subscriptions/91bd2205-0d74-42c9-86ad-41cca1b4822b/resourceGroups/MC_datacoves_east-us-a_eastus/providers/Microsoft.Compute/disks/pvc-fddcd2fc-7d35-40e9-b631-49c64bd87cbf` + +```yaml +apiVersion: v1 +kind: PersistentVolume +metadata: + name: +spec: + capacity: + storage: 20Gi + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain + storageClassName: default + csi: + driver: disk.csi.azure.com + readOnly: false + volumeHandle: + +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: + namespace: +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 20Gi + volumeName: + storageClassName: default + +``` + +Create the resources in Kubernetes + +```shell +kubectl apply -f pvc.yaml + +# Check the resources +kubectl get pvc | grep # pvc-2552cd9b-8231-409d-8b4b-a9d047415b53 +kubectl -n dcw-dev123 get pvc code-server-bru-10-config-volume +``` \ No newline at end of file diff --git a/docs/how-tos/register-github-self-hosted-runner.md b/docs/how-tos/register-github-self-hosted-runner.md new file mode 100644 index 00000000..e9c4970d --- /dev/null +++ b/docs/how-tos/register-github-self-hosted-runner.md @@ -0,0 +1,46 @@ +# Self hosted Github Runner +1. Create new runnner [in Github](https://github.com/datacoves/datacoves/settings/actions/runners). You must have `Owner` privileges. +3. Create a virtual machine, e.g. in Azure, and run the scritps that Github gave you on the previous step. +3. Install dependencies on the machine you created + +```bash +# Update and Upgrade +sudo apt-get update +sudo apt-get upgrade -y + +# Add Kubernetes repository and key +curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - +echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list + +# Add Helm repository and key +curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - +echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list + +# Update package list again after adding the Kubernetes and Helm repositories +sudo apt-get update + +# Install software/packages +sudo apt-get install -y apt-transport-https gnupg2 kubectl tmux python3-pip docker.io golang helm + +# Python symbolic link +sudo ln -s /usr/bin/python3 /usr/bin/python + +# Docker post-installation step for the current user +sudo usermod -aG docker $USER + +# Go and kind installation +go install sigs.k8s.io/kind@v0.20.0 +sudo ln -s /home/datacoves/go/bin/kind /usr/local/bin/kind +``` +4. run `tmux` to do not close the session when detached from ssh connection. +5. Follow any instruction you got from Github on step 1 and install the runner as a service: `sudo ./svc.sh install datacoves` +6. Boost inotify limits for system performance. Update the following values in the specified files: + + ```Boost inotify limits for system performance. Update the following values in the specified files: + ~$ cat /proc/sys/fs/inotify/max_user_instances + 1024 + ~$ cat /proc/sys/fs/inotify/max_user_watches + 524288 + ~$ cat /proc/sys/fs/inotify/max_queued_events + 16384 + ``` \ No newline at end of file diff --git a/docs/how-tos/release-notes.md b/docs/how-tos/release-notes.md new file mode 100644 index 00000000..7abda1aa --- /dev/null +++ b/docs/how-tos/release-notes.md @@ -0,0 +1,50 @@ +# Statement of Purpose + +The purpose of this document is to describe the process by which we manage release notes to deliver to our customers. + +# Source of Authority + +Release notes all come from Github: + +https://github.com/datacoves/datacoves/releases + +The notes begin live as auto-generated notes that are created when the release branch is built. Then, we hand-edit the release notes to match the following format: + +``` +Breaking Changes +* Items that are breaking changes, in list. + +New Features +* New features, in list. + +Enhancements +* Enhancements to old features, in list + +Fixes +* Bug fixes, in list + +Under the Hood +* Notes relevant to us internally which we would like to keep, but not important to customers. + +**Full Changelog**: This is a URL that is provided automatically, just leave it in the change log. +``` + +# Generating Release Notes + +Release notes are generated per-customer and have all the changes from their current release to the latest release you currently have downloaded in your 'releases' folder. Make sure you have the customer's cluster configuration checked out into your 'config' directory; if you do not, stop and ask for help before continuing. + +You can control which release notes are generated; make sure you have downloaded the releases first: + +``` +./cli.py download_releases +``` + +If desired or necessary, you can delete files out of your 'releases' directory; for instance, if the customer is getting updated to the latest 2.2 series release but there are 2.3 series releases available, you could delete all the 2.3 release files out of your 'releases' directory and notes for those releases will not be produced. + +Release notes are then generated using the `cli.py` thusly: + +``` +./cli.py combined_release_notes +``` + +It will make a file `combined.md` in the same directory as `cli.py`, and that will have the combined release notes for all the releases involved. This file can then be delivered to the customer as part of the announcement to upgrade them. diff --git a/docs/how-tos/request-access-to-a-cloud-pc-on-kenvue.md b/docs/how-tos/request-access-to-a-cloud-pc-on-kenvue.md new file mode 100644 index 00000000..1b0c577f --- /dev/null +++ b/docs/how-tos/request-access-to-a-cloud-pc-on-kenvue.md @@ -0,0 +1,7 @@ +# How to request access to a cloud PC on Kenvue + +1. Navigate to this [form](https://kenvue.service-now.com/iris?id=sc_cat_item&sys_id=8a99c827ccc10200120f45e73abfd575&sysparm_category=cf7700cf4fce0fc4e2f944401310c7cd). + +2. Complete it accordingly: + +![Kenvue Cloud PC](img/cloud-pc.jpg) \ No newline at end of file diff --git a/docs/how-tos/reset-datahub.md b/docs/how-tos/reset-datahub.md new file mode 100644 index 00000000..ee9087db --- /dev/null +++ b/docs/how-tos/reset-datahub.md @@ -0,0 +1,48 @@ +## Resetting Datahub + +Datahub uses PostgreSQL, ElastiCache, and Kafka. If any of these three things gets out of sync for any reason, Datahub will behave very strangely. For instance, it will claim secrets exist but not show them up in the UI. + +In such an event, you will need to reset Datahub. This can be done with the following steps: + +In all these examples, replace **xxx** with the slug (such as dev123). + +### Turn Off Datahub + +Go to the environment you wish to reset, and disable Datahub. Save and sync the environment and wait until Datahub come offline by monitoring the Datahub pods: + +``` +kubectl get pods -n dcw-xxx | grep datahub +``` + +This will take awhile. + +### Delete Metadata in PostgreSQL + +``` +./cli.py pod_sh +./manage.py dbshell +\c xxx_dh +drop table metadata_aspect_v2 +``` + +### Delete Persistent Volume Claims + +``` +kubectl delete pvc -n dcw-xxx elasticsearch-master-elasticsearch-master-0 +kubectl delete pvc -n dcw-xxx data-xxx-kafka-broker-0 +kubectl delete pvc -n dcw-xxx data-xxx-kafka-zookeeper-0 +``` + +### Verify Persistent Volumes are deleted + +``` +kubectl get pv -n dcw-xxx | grep xxx | grep elasticsearch +kubectl get pv -n dcw-xxx | grep xxx | grep kafka +``` + +These should show no results. These should delete automatically when the PVC is deleted, make sure they are gone. + +### Re-enable Datahub + +Go back to the environment, turn Datahub back on, and re-sync. + diff --git a/docs/how-tos/security-vulnerabilities-fix.md b/docs/how-tos/security-vulnerabilities-fix.md new file mode 100644 index 00000000..c37afeeb --- /dev/null +++ b/docs/how-tos/security-vulnerabilities-fix.md @@ -0,0 +1,31 @@ +# How to run security vulnerabilities check and fix them + +## React app + +### Install + +```shell +yarn add yarn-audit-fix -D +``` + +### Run + +```shell +yarn-audit-fix +``` + +Learn more: https://yarnpkg.com/package?name=yarn-audit-fix + +## Django app + +### Install + +```shell +pip install pip-audit +``` + +### Run + +```shell +pip-audit -r ./requirements.txt +``` diff --git a/docs/how-tos/set-maintenance-mode.md b/docs/how-tos/set-maintenance-mode.md new file mode 100644 index 00000000..7b4a4dda --- /dev/null +++ b/docs/how-tos/set-maintenance-mode.md @@ -0,0 +1,13 @@ +# How to set the cluster in "Maintenance Mode" + +Turning it on: + +``` +./cli.py set_maintenance_mode "on" "today at 9PM UTC" "support@datacoves.com" "our Support Team" +``` + +Turning it off: + +``` +./cli.py set_maintenance_mode "off" +``` diff --git a/docs/how-tos/setup-oauth-on-azure.md b/docs/how-tos/setup-oauth-on-azure.md new file mode 100644 index 00000000..c0e5d939 --- /dev/null +++ b/docs/how-tos/setup-oauth-on-azure.md @@ -0,0 +1,56 @@ +# How to set up oAuth authentication on Azure + +> **NOTE:** This guide was based on this [Auth0 help page](https://auth0.com/docs/authenticate/identity-providers/enterprise-identity-providers/azure-active-directory/v2), it could require some adjustments. + +This is done using Azure AD / Entra ID apps. + +## Register new app + +1. Navigate to App registrations on Azure Portal + +![App registrations](img/app-registrations.png) + +2. Register a new App, choosing a name, selecting "Accounts in this organizational directory only (Datacoves Inc. only - Single tenant)" +and providing a redirect url in the form of "https://api.{cluster_domain}/complete/azuread-tenant-oauth2" + +![Register new app](img/register-new-app.png) + +3. Once created, get the client id and tenant id from the overview page + +![Client ID and Tenant Id](img/client-id-tentant-id.png) + +## Generate Client Secret + +Navigate to 'Certificates & Secrets' and Generate a new client secret + +![Client secret](img/client-secret.png) + +Keep the value safe. + +## Configure permissions + +Navigate to app permissions and then 'Add permissions'. Select 'Microsoft Graph', then 'Delegated permissions', and the following OpenId permissions. + +![App permissions](img/app-permissions.png) + +Also add permissions to read groups memberships if they're going to be used to determine permissions in Datacoves. + +![App permissions](img/app-permissions-group.png) + +Finally, consent as an Admin the permissions granted by clicking on this button: + +![Admin consent](img/admin-consent.png) + +## Configure token + +We need to include the groups claim in both the ID and access token, to do so, go to Token configuration: + +![Token configuration](img/token-configuration.png) + +Click on "Add groups claim", select "Security groups", make sure "Group ID" is selected in both ID and Access tokens and click on Add. + +![Add group claim](img/add-group-to-token.png) + +## Configure Datacoves + +Configure the Client ID, Tenant ID and Client Secret accordingly on Datacoves using the env variables AZUREAD_CLIENT_ID, AZUREAD_TENANT_ID, and AZUREAD_CLIENT_SECRET. \ No newline at end of file diff --git a/docs/how-tos/setup-s3-for-dbt-api.md b/docs/how-tos/setup-s3-for-dbt-api.md new file mode 100644 index 00000000..8cecafac --- /dev/null +++ b/docs/how-tos/setup-s3-for-dbt-api.md @@ -0,0 +1,48 @@ +# Create a S3 bucket for dbt api artifacts + +## Create bucket on AWS console + +- Create an S3 bucket. +- Choose a bucket name, we suggest using _dbt_api where could be `ensemble`, `ensembletest`, etc. +- Create an IAM user with a policy to access the bucket, like the one below, + replacing `{your_bucket_name}` with your bucket's name. +- Create an access key for the user. Share it with the Datacoves team. + +``` +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:PutObject", + "s3:GetObject", + "s3:GetObjectVersion", + "s3:DeleteObject", + "s3:DeleteObjectVersion" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}/*" + }, + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket", + "s3:GetBucketLocation" + ], + "Resource": "arn:aws:s3:::{your_bucket_name}" + } + ] +} +``` + +## Configure Datacoves accordingly + +For the cluster being configured, set the following environment variables in the `core-dbt-api.env` file: + +``` +STORAGE_ADAPTER=s3 +S3_BUCKET_NAME=fill_in +S3_ACCESS_KEY=fill_in +S3_SECRET_ACCESS_KEY=fill_in +S3_REGION=fill_in +``` \ No newline at end of file diff --git a/docs/how-tos/testing-alerts.md b/docs/how-tos/testing-alerts.md new file mode 100644 index 00000000..1115d7bc --- /dev/null +++ b/docs/how-tos/testing-alerts.md @@ -0,0 +1,51 @@ +# How to create and test alerts + +## Stack + +- Alert Manager +- Loki Alert Ruler +- Grafana + +## Test Loki Alert + +1. Add the new alert on `scripts/data/loki-rules.yaml` file. +2. Install `Observability Stack`. +3. Force some logs. + +Example: + +```bash +# Option 1 +kubectl -n core exec -it api-75567b8958-7b7rx -- bash + +# Option 2 +./cli.py pod_sh + +./manage.py shell_plus +``` + +```python +import requests +import time + +payload = { + "streams": [ + { + "stream": { + "agent_hostname": "eventhandler", + "job": "test", + "namespace": "core" + }, + "values": [[ str(int(time.time() * 1e9)), "max node group size reached" ]] + } + ] +} + +requests.post( + url="http://loki-loki-distributed-gateway.prometheus.svc.cluster.local/loki/api/v1/push", + json=payload, + headers={"Content-Type": "application/json"} +) +``` + +4. Now you can see the alert on `Cluster Alerts` diff --git a/docs/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.md b/docs/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.md new file mode 100644 index 00000000..0448efbe --- /dev/null +++ b/docs/how-tos/trigger-cloudx-pipeline-on-kenvue-cluster.md @@ -0,0 +1,5 @@ +# How to trigger a cloudx pipeline manually after changing cluster.yml on a kenvue cluster + +1. Go to the bastion +2. Run the curl command you can find in 1Password named `Run cloudx pipelines using curl on Kenvue clusters` +3. Not that the `Branch` queryparam references the repo branch you changed. \ No newline at end of file diff --git a/docs/how-tos/update-kubernetes-and-datacoves.md b/docs/how-tos/update-kubernetes-and-datacoves.md new file mode 100644 index 00000000..44790f98 --- /dev/null +++ b/docs/how-tos/update-kubernetes-and-datacoves.md @@ -0,0 +1,327 @@ +# Statement of Purpose + +The purpose of this document is to describe common upgrade procedures for both updating Kubernetes and updating Datacoves on customer clusters. + + +# Updating Kubernetes + +The procedure varies for Azure vs. AWS. We generally prefer to use the web console to do the upgrade. + +## Gain Kubernetes command line access to the cluster + +Make sure you are set up for Kubernetes command line access. + + * For Orrum the instructions are here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum + +Access whatever VPN is necessary. Switch to the correct Kubernetes context: + +``` +kubectl config get-contexts +kubectl config use-context context-name +``` + +If you aren't set up to do this, stop now and get help. + +## Disable Sentry Alarms + +Sentry is going to complain very loudly about all this. + +Currently, it looks like there is no way to disable this without the Sentry Business Plan which we do not have. But if that ever changes, we'll update this section. *For now, there is nothing to do.* + +## Check and Prepare PDB's + +The Kubernetes PDBs can cause an upgrade to hang, as it will prevent a pod from shutting down to receive the update. Check the PDBs like this: + +``` +kubectl get pdb -A +``` + +You will get an output similar to: + +``` +NAMESPACE NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE +calico-system calico-typha N/A 1 1 273d +core api 1 N/A 0 232d +core beat 1 N/A 0 232d +core redis 1 N/A 0 232d +core workbench 1 N/A 0 232d +core worker 1 N/A 0 232d +dcw-dev123 dev123-airflow-scheduler-pdb N/A 1 1 26h +dcw-dev123 dev123-airflow-webserver-pdb N/A 1 1 26h +kube-system coredns-pdb 1 N/A 1 273d +kube-system konnectivity-agent 1 N/A 1 273d +kube-system metrics-server-pdb 1 N/A 1 273d +``` + +Note the core namespace clusters with ALLOWED DISRUPTIONS at 0. You will need to patch those so that they will allow a disruption, and then revert the patch when done. + +The following commands will allow for a disruption: + +``` +kubectl patch pdb -n core api -p '{"spec":{"minAvailable":0}}' +kubectl patch pdb -n core beat -p '{"spec":{"minAvailable":0}}' +kubectl patch pdb -n core redis -p '{"spec":{"minAvailable":0}}' +kubectl patch pdb -n core workbench -p '{"spec":{"minAvailable":0}}' +kubectl patch pdb -n core worker-long -p '{"spec":{"minAvailable":0}}' +kubectl patch pdb -n core worker-main -p '{"spec":{"minAvailable":0}}' +kubectl patch pdb -n core dbt-api -p '{"spec":{"minAvailable":0}}' +kubectl patch pdb -n prometheus cortex-tenant -p '{"spec":{"minAvailable":0}}' +``` + +You can apply this to any other PDBs that prevent disruptions. *Take note of all the PDBs that you altered in this fashion.* + +## Upgrade Kubernetes + +This varies based on the cloud provider. + +### On Azure + +Go to: + +https://portal.azure.com/#view/HubsExtension/BrowseResource/resourceType/Microsoft.ContainerService%2FmanagedClusters + +Make sure you are logged into the correct client account (check the upper right corner). + +Locate the cluster you want to work with. Often you will have to alter the default filters so that "Subscription equals all". + +Pick the cluster you are updating. If you are not sure which one, ask. + +On the overview screen that comes up by default, you will see "Kubernetes version" in the upper right area. Click the version number. + +It will show version details; click Upgrade Version. + + * Pick Automatic upgrade: Enabled with patch (recommended) + * Pick Kubernetes version: the version you wish to upgrade to + * Pick upgrade scope: Upgrade control plane + all node pools + * Click save + +The upgrade will start in a few moments. + +## Wait for it to come back + +The update can take quite awhile. Keep an eye on the pods and watch them update: + +``` +kubectl get pods -A +``` + +You will see a lot of activity, pods shutting down and restarting. Once it's all back online, you can restore the PDBs (see next step) and you can verify the update (see bottom of this file). + +## Restore PDB's + +We need to put the PDB's back in place. + +``` +kubectl get pdb -A +``` + +You will get an output similar to: + +``` +NAMESPACE NAME MIN AVAILABLE MAX UNAVAILABLE ALLOWED DISRUPTIONS AGE +calico-system calico-typha N/A 1 1 273d +core api 0 N/A 1 232d +core beat 0 N/A 1 232d +core redis 0 N/A 1 232d +core workbench 0 N/A 1 232d +core worker 0 N/A 1 232d +dcw-dev123 dev123-airflow-scheduler-pdb N/A 1 1 26h +dcw-dev123 dev123-airflow-webserver-pdb N/A 1 1 26h +kube-system coredns-pdb 1 N/A 1 273d +kube-system konnectivity-agent 1 N/A 1 273d +kube-system metrics-server-pdb 1 N/A 1 273d +``` + +The following commands will re-enable the PDBs: + +``` +kubectl patch pdb -n core api -p '{"spec":{"minAvailable":1}}' +kubectl patch pdb -n core beat -p '{"spec":{"minAvailable":1}}' +kubectl patch pdb -n core redis -p '{"spec":{"minAvailable":1}}' +kubectl patch pdb -n core workbench -p '{"spec":{"minAvailable":1}}' +kubectl patch pdb -n core worker-main -p '{"spec":{"minAvailable":1}}' +kubectl patch pdb -n core worker-long -p '{"spec":{"minAvailable":1}}' +kubectl patch pdb -n core dbt-api -p '{"spec":{"minAvailable":1}}' +kubectl patch pdb -n prometheus cortex-tenant -p '{"spec":{"minAvailable":1}}' +``` + +Also restore any additional PDBs you had to disable in the prior step. + +# Updating DataCoves + +Updating DataCoves is relatively simple. However, some of the access details can be compllicated. + +## First Time Setup: Set Up Deployment Environment and Get Needed Access + +J&J, Kenvue, and Orrum have some complexity around access. AKS access is relatively easy. These are one-time steps you need to take to get access to each environment. + +### AKS + +Accessing AKS is documented here: https://github.com/datacoves/datacoves/blob/main/docs/how-tos/administrate-east-us-a-aks-cluster.md + +Installation is done using your development system's checked out copy of the Datacoves repository. AKS' configuration repository is located at: https://github.com/datacoves/config-datacoves-east-us-a and should be checked out into your 'config' directory. + +### Orrum + +Accessing Orrum is documented here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum + +Installation is done using your development system's checked out copy of the Datacoves repository. Note that Orrum requires a VPN, but the access is described above. Orrum's configuration repository is here: https://github.com/datacoves/config-datacoves-orrum and must be checked out into your 'config' directory. + +### CCS + +To access CCS, your Datacoves account must be added to CCS' Azure organization. Eugine Kim can assist with this. + +Then, you must download and install the Azure VPN client. For Macs, this is done through the Apple Store. + +And finally, you need the Azure command line tools which you probably already have installed if you followed our README instructions for setting up this repository. You should also be logged into Azure with `az login`. + +Then, on the VPN, you can shell into the Bastion as follows: + +``` +az ssh vm --subscription 3099b8af-7ca1-4ff4-b9c5-1960d75beac7 ssh vm --ip 10.0.2.4 +``` + +Once on the Bastion, the tools are installed with Linux Brew: So, edit your `.bashrc` file in your home directory with your favorite editor and add this to the end: + +``` +eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv) +``` + +Log out and log back in. ```python3 --version``` should reveal a modern `3.1x` python version. + + +From this point, it is simply check out the datacoves repository and do the installation like any other system. + +### J&J / Kenvue + +J&J access is complex; going into the details of all the setup is out of the scope of this documentation. However, we will cover how to get set up on the bastion so you can get to work. + +It is a good idea to read this documentation if you haven't already: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/jnj + +In order to do deployments in J&J or Kenvue, you have to do the work from a bastion server, which is a Linux machine accessible via your Cloud PC. J&J and Kenvue have different bastions, however configuring them is basically the same. + +The IP address for the J&J Bastion is: `10.157.82.138` and the IP address for the Kenvue bastion is: (... I am unable to log into Kenvue right now! Great!) + +I make a `.bat` file that runs `ssh IP` where the IP is the one above. + +Once you log into the bastion, there's a few things to note: + + - You can sudo to root thusly: `sudo su -`. Any other `sudo` command will not work, you can only `sudo su -`. + - The default home directory you log into on the bastion does not have much disk space, so we use a volume mount on `/app` for most of our work. + - We use `brew` to manage packages. + +To get set up initially, take the following steps: + +#### Copy base configuration + +```cp -R /app/users/datacoves-home-template/. ~/``` + +#### Add brew to your bash rc + +Edit your `.bashrc` file in your home directory with your favorite editor and add this to the end: + +``` +eval $(/home/linuxbrew/.linuxbrew/bin/brew shellenv) +``` + +Log out and log back in. ```python3 --version``` should reveal a modern `3.1x` python version. + +#### Login to Kubernetes + +``` +kubectl config get-contexts +``` + +#### Set up your deployment repository + +``` +sudo su - +mkdir -p /app/users/$USER +chown -R $USER /app/users/$USER +exit +cd /app/users/$USER +git clone https://github.com/datacoves/datacoves.git +cd datacoves +python3 -m venv .venv +source .venv/bin/activate +pip3 install -r requirements.txt +``` + +#### Set up your configuration repository + +For each environment you will deploy to, you need to check out its config repository into your 'configs' directory. The list of repositories is here: + +https://github.com/datacoves/datacoves/blob/main/docs/client-docs/jnj/1-cluster-requirements.md + +## Before Deployment: Create your Plan + +Before a deployment is done, you must first check to see if there's any special installation steps. I use a Word document template, and I update it according to each release adding any special steps that I need to. Then I print it out and use it as a physical check list. My template file is [here](DeploymentTemplate.doc). + +First, look at the version of the cluster you will be updating. You can get this version from the cluster-params.yaml. The easiest way to do this is to check the difference between two versions in GitHub. Here's an example of a comparison between two versions: + +https://github.com/datacoves/datacoves/compare/v3.2.202410250048...v3.2.202411140044 + +Look at all the pull requests that are in your new releae and check to see if you have any that are labeled "special release step" and add any special steps to your release document. Post your finished work on the Slack dev channel for commentary. + +## Perform the installation + +Release documentation is here: https://www.notion.so/datacoves/Release-Instructions-1b5ea827f87280f98620dccc1600727c **Be very sure you are releasing from the correct release branch**. You need to release from the tag you are releasing. You can check out a tag thusly: + +``` +git fetch -a +git checkout refs/tags/v1.2.34234523452524 +``` + +Replace the tag name with the version you are deploying. If you deploy from main or the wrong branch, you risk using installation scripts that are newer and have features that aren't supported yet by the images you are edeploying. + +### How to run migrations on a stuck install process + +Sometimes migrations do not run automatically because the new pod containing the migrations fails before they can be applied. When this occurs we need to execute them manually. So we need to remove the `LivenessProbe` and `ReadinessProbe`, this makes the new pod run correctly and allows us to enter it and execute the migrations ourselves. + +```shell +kubectl patch deployments -n core api -p '{"spec": {"template": {"spec": {"containers":[{"name": "api", "livenessProbe": null, "readinessProbe": null}]}}}}' +``` + +When the pod run correctly. + +```shell +kubectl -n core get pods +kubectl -n core exec -it api- -- bash +./manage.py migrate +``` + +### Create Profile Image Set for New Release + +This may be necessary if an error about Profile Image Sets occurs; it is a bit of a chicken and the egg problem, as the release needs to exist prior to creating the profile image set, but the release won't exist until the install process is attempted. + +Log into the customer's API panel. + + * Orrum's is: https://api.datacoves.orrum.com/panel + * CCS' is: https://api.datacoves.cssperfusion.com/panel + +Under "Projects" pick "Profile Image Sets". Go to the existing Profile Image Set for the old release, and copy / paste the 4 JSON blocks into an editor. Take a note of what is in the 'profile' field. + +Go back to the listing of Profile Image Sets and click `+ Add profile image set` in the corner. Make the profile the same as the previous release's, and choose the new release from the release select box. + +Then, paste in the four JSON blocks into the new Profile Image Set. Check your release YAML file in `releases` and note the section 'code_server_libraries'; compare that to the Python libraries in the profile image set. Update versions as needed, but never downgrade. There's no need to add libraries that are in the release YAML but not in the profile image entry. + +Also check 'code_server_extensions' against 'code server extensions' and apply the same logic to update extensions that are in the Profile Image Set. + +Save the new profile image set, and making sure to keep all the data from the old profile image set just in case you need it, go back into that one and delete it. + +You can now re-run installation and it should get past this error. + +# Verify Installation + +Verifying the installation is the same no matter what process you're engaging in with DataCoves clusters, be it a Kubernetes update or a DataCoves update. + + * Make sure no helm chart failed and retry if needed: `./cli.py retry_helm_charts` + * Log into the customer's API panel and make sure that is working. + * Log into the customer's launchpad and make sure that is working. + * Pick one of the customer's environments and make sure you can get into it. + * Try to use code server ("Transform") + * Open a terminal in code server and run `dbt-coves --version` + * Try to use Airflow ("Orchestrate") + * Look at logs in one of the DAGs + +If your user does not have permission to get into the customer's cluster, temporarily add yourself to the necessary groups to check the cluster. diff --git a/docs/how-tos/update-ssl-certificates.md b/docs/how-tos/update-ssl-certificates.md new file mode 100644 index 00000000..4c6096ea --- /dev/null +++ b/docs/how-tos/update-ssl-certificates.md @@ -0,0 +1,304 @@ +# Statement of Purpose + +The purpose of this document is to describe the process of upgrading SSL certificates for customers that are using custom certificates (i.e. not using Let's Encrypt). + +# Step 1: Prepare and Verify Certificate Files + +*This should be done soon after certificate files are received, and not last minute.* + +Ultimately, we need the following files: + + * root.cer + * root.secret.key + * wildcard.cer + * wildcard.secret.key + +The root.cer is the certificate for the root domain, i.e. datacoves.orrum.com + +wildcard.cer is a wildcard, i.e. *.datacoves.orrum.com + +All of these files should be in pem format; the cer files should have the complete keychain. A pem format looks like this: + +``` +-----BEGIN CERTIFICATE----- +MIIEjTCCAvWgAwIBAgIQQ71EG0d4110tqpc8I8ur/jANBgkqhkiG9w0BAQsFADCB +pzEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMT4wPAYDVQQLDDVzc2Fz +c2lAU2ViYXN0aWFucy1NYWNCb29rLVByby5sb2NhbCAoU2ViYXN0aWFuIFNhc3Np +.... +JbszQlyzkyzBxQ5eiK3OUNdsB+n5Zo+TshRRL45wA9fZmvAizzmtehxJWUbidGL7 +eqqMWqdt11MTLJ3feOjGlryMFO6TIt/aH/91VkoLyVhsemuk5LukZ1nIxoWvzHcf +y2cC+I3F8bWbYkRr92fmb8A= +-----END CERTIFICATE----- +``` + +There should be several BEGIN / END certificate blocks in wildcard.cer and root.cer file; the wildcard.csr and root.csr files should have a complete certificate stack and should be suspect if they only contain a single certificate block. + +The key files will have a slightly different header, looking like this: + +``` +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCLf9Q17CQlOWDB +CwWOuzL4+aalFwj2PR+OTuPnjHCI8stDedvmy5jtxSkdAL+5PgNu7ZJbKFhbODgT +... +OpuSfWnGVhOmii2aiYePtvNqDsLQv59MUxpUi8R6aw/XhG2Vb7t14+hbmUtRScUV +LcGdNBdJyB8NaHYR/sNF1w== +-----END PRIVATE KEY----- +``` + +*If you receive a pfx format file, we cover that in a section below. Read that section and go through those steps, then return to this section to complete verification.* + +You can verify the certs with the following commands: + +```shell +# Verify root +openssl crl2pkcs7 -nocrl -certfile root.cer | openssl pkcs7 -print_certs -noout -text + +# Verify wildcard +openssl crl2pkcs7 -nocrl -certfile wildcard.cer | openssl pkcs7 -print_certs -noout -text +``` + +And you will see several blocks with a Certificate header. One block should contain the host name for the certificate. In our example, datacoves.orrum.com: + +``` +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 01:cb:00:21:05:34:94:76:2b:f8:68:cf:8a:09:4c:02 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1 + Validity + Not Before: Apr 22 00:00:00 2024 GMT + Not After : Apr 21 23:59:59 2025 GMT + Subject: CN=datacoves.orrum.com +``` + +Note the hostname under 'Subject'; make sure that is the correct host. root will appear as above, as a single host name; wildcard should look like this instead: + +``` +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 0d:7f:e3:36:2c:db:b0:65:78:9a:c1:88:f8:06:12:4f + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1 + Validity + Not Before: Apr 22 00:00:00 2024 GMT + Not After : Apr 21 23:59:59 2025 GMT + Subject: CN=*.datacoves.orrum.com +``` + +Note the * symbol there in the subject. Also take note of the issuer; `CN=Thawte TLS RSA CA G1`. + +Elsewhere in the certificate output, you should see a certificate for the issuer, such as: + +``` +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 09:0e:e8:c5:de:5b:fa:62:d2:ae:2f:f7:09:7c:48:57 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2 + Validity + Not Before: Nov 2 12:24:25 2017 GMT + Not After : Nov 2 12:24:25 2027 GMT + Subject: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1 +``` + +Note the subject matches the issuer name. And finally, this certificate has an issuer as well; make sure that one is in the file. In this case, `DigiCert Global Root G2`. In our example, you can find it here: + +``` +Certificate: + Data: + Version: 3 (0x2) + Serial Number: + 03:3a:f1:e6:a7:11:a9:a0:bb:28:64:b1:1d:09:fa:e5 + Signature Algorithm: sha256WithRSAEncryption + Issuer: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2 + Validity + Not Before: Aug 1 12:00:00 2013 GMT + Not After : Jan 15 12:00:00 2038 GMT + Subject: C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2 +``` + +Note again the 'subject' line. Typically PEM files will have certificates in the following order: + + * Host's certificate + * One or More Intermediate + * Root certificate + +If you have to assemble a certificate from multiple parts, please be aware that this is the recommended ordering; however I don't think it will cause an error if you get the ordering wrong. + +Once your certificates are in order, you can verify the key with the following commands: + +``` +openssl rsa -check -noout -in wildcard.secret.key +openssl rsa -check -noout -in root.secret.key +``` + +Both should say: `RSA key is okay` + +Now compare the modulus of the key and the cert: + +``` +# These two should match +openssl rsa -modulus -noout -in wildcard.secret.key | openssl md5 +openssl x509 -modulus -noout -in wildcard.cer | openssl md5 + +# And these two should match +openssl rsa -modulus -noout -in root.secret.key | openssl md5 +openssl x509 -modulus -noout -in root.cer | openssl md5 +``` + +If the modulus doesn't match, it may be because the server certificate isn't the first certificate in the .cer file. Make sure the order is correct and try again. + +## Converting pfx format files + +We have received files in pfx format instead of pem and these require special handling. Follow the following directions to convert them to usable cer and key files, then use the following commands: + +```shell +# Assuming we have files wildcard.pfx and root.pfx +# +# Note: The --legacy option seems to be needed for most people, however +# some are able to do this without --legacy ... you can try without +# it first if you want. +# +# You will be asked for an "Import Password" -- just hit enter to skip that +# If you get an error after the Import Password, you need --legacy + +openssl pkcs12 -in wildcard.pfx -cacerts -out wildcard_ca.cer -nodes -nokeys --legacy +openssl pkcs12 -in root.pfx -cacerts -out root_ca.cer -nodes -nokeys --legacy +``` + +Edit the wildcard.cer and root.cer files, and remove the header above `-----BEGIN CERTIFICATE-----`. This header will resemble this: + +``` +Bag Attributes: +subject=C=US, O=DigiCert Inc, OU=www.digicert.com, CN=Thawte TLS RSA CA G1 +issuer=C=US, O=DigiCert Inc, OU=www.digicert.com, CN=DigiCert Global Root G2 +``` + +*WARNING: Check the ENTIRE file, as there will probably be multiple of the headers. Any text not between `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----` must be removed!* + +Next, you need to extract the server certs, thusly: + +``` +# See notes above regarding --legacy and "Import Password" + +openssl pkcs12 -in wildcard.pfx -clcerts -nokeys -out wildcard.single.cer --legacy +openssl pkcs12 -in root.pfx -clcerts -nokeys -out wildcard.single.cer --legacy +``` + +Once again, delete the header(s) above `-----BEGIN CERTIFICATE-----` in these files. Afterwards, run the following command: + +``` +cat wildcard.single.cer wildcard_ca.cer > wildcard.cer +cat root.single.cer root_ca.cer > root.cer +``` + +Now we're going to generate the private keys. When generating the private keys, set a temporary password (just the word `password` is fine); we will remove the password in the subsequent step. + +``` +# See notes above regarding --legacy and "Import Password" +openssl pkcs12 -in wildcard.pfx -nocerts -out wildcard.secrets.withpass.key --legacy +openssl pkcs12 -in root.pfx -nocerts -out root.secrets.withpass.key --legacy +``` + +And finally, strip the passwords out for the final key files: + +``` +openssl rsa -in wildcard.secrets.withpass.key -out wildcard.secret.key +openssl rsa -in root.secrets.withpass.key -out root.secret.key +``` + +Now you have the files in PEM format, and you can go back to the section above to verify them. + +# Step 2: Update Cluster + +This step may vary from customer to customer, so see the appropriate subsection. + +## Orrum + +First, make sure you have the configuration repository checked out. In your `config` directory, clone it thusly: + +``` +git clone https://github.com/datacoves/config-datacoves-orrum.git datacoves.orrum.com +``` + +In the `datacoves.orrum.com` directory, reveal the secrets. If you call this command within a sub directory, you'll get an error about `core-api.env.secret` cannot be found. + +``` +git secret reveal -f +``` + +TODO: add instructions for setting up git secret + +Then in the `base` directory you will find `root.cer`, `root.secret.key`, `wildcard.cer`, and `wildcard.secret.key`. Replace these files with the new, verified files from step 1. + +Connect to the Orrum VPN. Instructions are here: https://github.com/datacoves/datacoves/tree/main/docs/client-docs/orrum + +Make sure you are in your Orrum context, whatever that is named: + +``` +# Use: +# kubectl config get-contexts +# To get context list if needed. +kubectl config use-context orrum_new +``` + +Then run setup base. Return to the root directory of your git checkout to run `cli.py` thusly: + +``` +# Activate your venv first if necessary +./cli.py setup_base +``` + +After the cluster is updated (ingress will be updated), check the certificate: + +``` +curl https://api.datacoves.orrum.com -vI +``` + +This should output a bunch of information about the certificate, including: + +``` +* Server certificate: +* subject: CN=*.datacoves.orrum.com +* start date: Apr 8 07:33:48 2024 GMT +* expire date: Jul 1 07:33:47 2024 GMT +* subjectAltName: host "api.datacoves.orrum.com" matched cert's "*.datacoves.orrum.com" +* issuer: C=US; O=DigiCert Inc; OU=www.digicert.com; CN=Thawte TLS RSA CA G1 +* SSL certificate verify ok. +``` + +(The CN should be the correct host, and the expire date should be correct). + +Check the non-wildcard version as well: + +``` +curl https://datacoves.orrum.com -vI +``` + +Log into Orrum's launchpad and go into one of the environments to make sure pomerium doesn't have any issues; pomerium is particularly sensitive to certificate problems such as not having the full certificate chain in the root.cer / wildcard.cer files. + +If everything works alright, let's push the secrets. Be careful to not push up the key files as they will show up as "Untracked Files" in a `git status`. It is recommended you manually add the files thusly: + +``` +# Go back to the config directory +cd config/datacoves.orrum.com + +# See what files changed +git status + +# Add only the changed files, do NOT add the .key files or the original .pfx +git add .gitsecret/paths/mapping.cfg base/root.cer base/wildcard.cer secrets/core-api.env.secret secrets/docker-config.secret.json.secret secrets/rabbitmq.env.secert + +# You can also add any other safe file that you modified, just not those keys! + +git commit -m "Update certificates" +git push +``` + +And it should be done! diff --git a/docs/how-tos/upgrade-dbt-or-related-tools.md b/docs/how-tos/upgrade-dbt-or-related-tools.md new file mode 100644 index 00000000..a9d4f2f8 --- /dev/null +++ b/docs/how-tos/upgrade-dbt-or-related-tools.md @@ -0,0 +1,13 @@ +# How to upgrade dbt or related tools + +## dbt-coves + +- Pull Request on dbt-coves and merge. This will deploy a new pypi version + +## All libraries + +- Get current version of new libraries +- Upgrade code-server (src/code-server/code-server) docker image requirements.txt and labels +- Upgrade ci images libraries: ci/airflow and ci/basic, update labels. +- Upgrade airflow image libraries, install the new libraries in the environment targeted for dag runs, update labels accordingly. +- Run script that updates labels on docker files diff --git a/docs/how-tos/work-on-a-pre-release-locally.md b/docs/how-tos/work-on-a-pre-release-locally.md new file mode 100644 index 00000000..75630944 --- /dev/null +++ b/docs/how-tos/work-on-a-pre-release-locally.md @@ -0,0 +1,43 @@ +## Make and work on a pre-release locally + +Sometimes you need to change images and test them locally without affecting production releases. + +To do so: + +### Build the image you just changed + +```sh +./cli.py build_and_push # i.e. src/core/api +``` + +You'll need to specify the issue # + +This command will build and push a new image prefixing its name with the ticket number your provided. + +### Generate the pre-release + +Once the image was pushed, you can create a new pre-release to try that image: + +```sh +./cli.py generate_release +``` + +This will create a new release file under /releases and will also be pushed to GitHub releases so other devs can reuse it. + +### Set the pre-release on datacoveslocal.com cluster + +```sh +./cli.py set_release +``` + +Select `datacoveslocal.com`. + +You might need to undo the file changes before pushing to PR branch. + +### Upgrade datacoves in local cluster + +```sh +./cli.py install +``` + +Select `datacoveslocal.com` \ No newline at end of file diff --git a/docs/img/admin-groups-selection.png b/docs/img/admin-groups-selection.png new file mode 100644 index 00000000..df9d5468 Binary files /dev/null and b/docs/img/admin-groups-selection.png differ diff --git a/docs/img/dbt-not-responding/dbt-not-responding.png b/docs/img/dbt-not-responding/dbt-not-responding.png new file mode 100644 index 00000000..95ad02e1 Binary files /dev/null and b/docs/img/dbt-not-responding/dbt-not-responding.png differ diff --git a/docs/img/dbt-not-responding/run-dbt-inside-python.png b/docs/img/dbt-not-responding/run-dbt-inside-python.png new file mode 100644 index 00000000..898a473d Binary files /dev/null and b/docs/img/dbt-not-responding/run-dbt-inside-python.png differ diff --git a/docs/img/ping_attribute_mappings.png b/docs/img/ping_attribute_mappings.png new file mode 100644 index 00000000..f5e0adc6 Binary files /dev/null and b/docs/img/ping_attribute_mappings.png differ diff --git a/docs/img/ping_settings.png b/docs/img/ping_settings.png new file mode 100644 index 00000000..3ac9ec55 Binary files /dev/null and b/docs/img/ping_settings.png differ diff --git a/docs/img/token-error.png b/docs/img/token-error.png new file mode 100644 index 00000000..996813d4 Binary files /dev/null and b/docs/img/token-error.png differ diff --git a/docs/implementation/README.md b/docs/implementation/README.md new file mode 100644 index 00000000..e69de29b diff --git a/docs/implementation/operator.md b/docs/implementation/operator.md new file mode 100644 index 00000000..18c3df9f --- /dev/null +++ b/docs/implementation/operator.md @@ -0,0 +1,134 @@ +# Operator documentation + +## Overview + +The datacoves _operator_ is a kubernetes [controller][], written in go, +scaffolded using [kubebuilder][]. It is responsible for setting up and +managing the kubernetes resources that make up a _workspace_ (a.k.a. an +_environment_). Each workspace has its own k8s namespace. The operator's source +code is in `src/core/operator/`. + +[controller]: https://kubernetes.io/docs/concepts/architecture/controller/ +[kubebuilder]: https://book.kubebuilder.io/ + + +The operator watches a few custom resources that specify what to set up. They +are defined in `api/v1/`. + +* `Workspace`: The main resource, fully describing a workspace. Parts of the +configuration are held in other resources, but the workspace references them all +and is the root of the configuration. Whenever a change to a model in the core +api database impacts a workspace configuration, the core-api's workspace.sync +task recomputes and (re-)writes the corresponding workspace k8s resource. The +operator detects the resource update and runs the reconciliation process to +apply any required changes to the kubernetes resources that compose the workspace. + +* `User`: Each workspace has a set of users, and each user gets certain resources, +such as a code-server deployment. + +* `HelmRelease`: Most services set up by the operator are installed using helm. +A HelmRelease specifies that a helm chart should be installed, using a certain +version and helm values. + + +## Background + +Some useful background knowledge to have and resources to review: + +### Go + +* The [go spec](https://go.dev/ref/spec) is short, readable and precise. Use it. +* [Effective go](https://go.dev/doc/effective_go) and the [go FAQ](https://go.dev/doc/faq). +* Understanding go's concurrency constructs. CSP, goroutines and channels. +* Understanding that go (like C) is pass by value, so the distinction between + struct types and pointers to structs is often important. +* Understanding that errors are values in go. +* Understanding the [context](https://go.dev/blog/context) package. +* [How controller-runtime's does logging](https://github.com/kubernetes-sigs/controller-runtime/blob/main/TMP-LOGGING.md). + +### Kubernetes + +* [API concepts](https://kubernetes.io/docs/reference/using-api/api-concepts/) +* [API conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md) +* [The kubebuilder book](https://book.kubebuilder.io/) +* Understand resourceVersion and generation. +* Understand ownerReferences and finalizers. + + +## Implementation: Reconcilers + +### Change detection and reconciliation + +The entry points to our code are the `Reconcile` methods for each resource, in +`controllers/*_controller.go`. The framework [watches][] kubernetes resources to +determine when to call `Reconcile`. The `SetupWithManager` method can be used +to influence when `Reconcile` should be called. + +[watches]: https://kubernetes.io/docs/reference/using-api/api-concepts/#efficient-detection-of-changes + +Reconciliation must be idempotent. If an error is returned, or there's a panic, +the framework will retry calling `Reconcile` repeatedly, less frequently each +time. + +To simplify change detection and ensure deployments are restarted when a secret +or configmap that affects them changes, we treat secrets and configmaps as +immutable values. We include a hash of their contents in their names. This means +to start using the new version references to them must be updated. This implies +that resources using them will change too, which means all changes can be detected +by watching the resource that has the reference, without checking the contents +of the secret or configmap. + +### Applying changes to derived resources + +Reconciliation is conceptualy stateless. We compute a set of derived resources +from the current value of the Workspace resource. We would like to have a +primitive that is the equivalent of `kubectl apply` in our go code. Unfortunately +reusing that mechanism is/was not available when writing the operator so we had +to build our own resource diffing. These are the `reconcile*` functions in +`controllers/reconcilers.go`. + +### Concurrency + +The framework runs `Reconcile` concurrently for different resource types. It also +runs the reconciliation for different resources concurrently, at most `MaxConcurrentReconciles` +at once. Reconciliation of multiple changes to a single resource happens serially. + +We take advantage of this fact to isolate failures. The Workspace reconciler +applies changes to HelmRelease and User resources. This way the reconciliaton of +a HelmRelease or a User failing won't make the whole Workspace reconciliation fail. + + +## Implementation: Helm runner + +Before having the `helm` module carry out the installation of helm charts by +starting helm subprocesses we used to call into helm's go code directly from +the helmrelease controller. This caused two problems: + +* When the operator was restarted the helm release (stored by helm in a k8s secret) + could be left in a pending-upgrade state, which should only happen if helm is + still running. This is due to helm not cleaning up when interrupted. +* We run out of memory, most likely due to a memory leak involving helm state. + +To address these issues we implemented the `helm` module, which schedules helm +supbrocesses so that we can control their execution. It is a separate module +that runs a singleton scheduler process and receives requests to run helm over a +channel. The helmrelease_controller simply sends requests to this process +without waiting or checking results. + +Currently helm install failures will be logged but won't be retried. Manual +intervention is required in this case. In any case, retrying the whole helm +install is unlikely to succeed if nothing changed. Certain kinds of intermitent +failures could be detected and retried within an operation if desired. But in +this case, not retrying the helmrelease reconciliation as a whole is best, I think. + +The meat of the implementation is in the `run` function. It keeps track of +running and pending operations (and their potential memory usage) and spawns new +goroutines for each install/upgrade/uninstall operation. It is somewhat subtle +code. You should understand goroutines and channels well before touching it. + +When the operator is signaled by kubernetes to exit, we must be as gentle as +possible with helm subprocesses to avoid leaving the releases in a bad state. +There's a grace period between the first signal that the program will exit +and forceful termination. We use it to send SIGTERM to all the helm subprocesses, +which should allow them to exit more cleanly than if they were SIGKILLed. We +haven't seen any more chart's left in `pending-upgrade` after this change. diff --git a/docs/index.html b/docs/index.html index c6463912..ae11f2de 100644 --- a/docs/index.html +++ b/docs/index.html @@ -1,15 +1,20 @@ - - - + + + @@ -19,8 +24,11 @@ /> Datacoves Docs - - + + - +
@@ -134,7 +139,6 @@ - diff --git a/docs/issues-resolutions/airflow-corrupted-dag-logs.md b/docs/issues-resolutions/airflow-corrupted-dag-logs.md new file mode 100644 index 00000000..7f234c8e --- /dev/null +++ b/docs/issues-resolutions/airflow-corrupted-dag-logs.md @@ -0,0 +1,74 @@ +## DAG logs were serialized with a newer version of pickle than the installed on Airflow webserver + +### Logs + +``` +Traceback (most recent call last): + File "/home/airflow/.local/bin/airflow", line 8, in + sys.exit(main()) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/__main__.py", line 38, in main + args.func(args) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/cli_parser.py", line 51, in command + return func(*args, **kwargs) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/cli.py", line 99, in wrapper + return f(*args, **kwargs) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py", line 75, in scheduler + _run_scheduler_job(args=args) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/cli/commands/scheduler_command.py", line 46, in _run_scheduler_job + job.run() + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/base_job.py", line 244, in run + self._execute() + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 739, in _execute + self._run_scheduler_loop() + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 827, in _run_scheduler_loop + num_queued_tis = self._do_scheduling(session) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 909, in _do_scheduling + callback_to_run = self._schedule_dag_run(dag_run, session) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/jobs/scheduler_job.py", line 1151, in _schedule_dag_run + schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper + return func(*args, **kwargs) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 522, in update_state + info = self.task_instance_scheduling_decisions(session) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper + return func(*args, **kwargs) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 640, in task_instance_scheduling_decisions + tis = list(self.get_task_instances(session=session, state=State.task_states)) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/utils/session.py", line 68, in wrapper + return func(*args, **kwargs) + File "/home/airflow/.local/lib/python3.7/site-packages/airflow/models/dagrun.py", line 441, in get_task_instances + return tis.all() + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/query.py", line 2683, in all + return self._iter().all() + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 1335, in all + return self._allrows() + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 408, in _allrows + rows = self._fetchall_impl() + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 1243, in _fetchall_impl + return self._real_result._fetchall_impl() + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 1636, in _fetchall_impl + return list(self.iterator) + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/orm/loading.py", line 120, in chunks + fetch = cursor._raw_all_rows() + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 400, in _raw_all_rows + return [make_row(row) for row in rows] + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/engine/result.py", line 400, in + return [make_row(row) for row in rows] + File "/home/airflow/.local/lib/python3.7/site-packages/sqlalchemy/sql/sqltypes.py", line 1816, in process + return loads(value) + File "/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py", line 275, in loads + return load(file, ignore, **kwds) + File "/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py", line 270, in load + return Unpickler(file, ignore=ignore, **kwds).load() + File "/home/airflow/.local/lib/python3.7/site-packages/dill/_dill.py", line 472, in load + obj = StockUnpickler.load(self) +ValueError: unsupported pickle protocol: 5 +``` + +### Solution + +Connect to scheduler or triggerer pod and then remove DAG by running: + +```sh +airflow dags delete +``` diff --git a/docs/issues-resolutions/dbt-core-debugging.md b/docs/issues-resolutions/dbt-core-debugging.md new file mode 100644 index 00000000..f8e5c9c3 --- /dev/null +++ b/docs/issues-resolutions/dbt-core-debugging.md @@ -0,0 +1,24 @@ +## Python dbt-core debugging + +### Context: dbt does not respond to any of it's commands + +Due to changes in environment variable handling on dbt-core side, a read-only `$DBT_PROJECT_DIR` led to dbt not responding to anything but the `--version` call. + +![dbt-not-responding](../img/dbt-not-responding/dbt-not-responding.png) + +All dbt commands returned [exit code 2](https://docs.getdbt.com/reference/exit-codes) + +``` +2 The dbt invocation completed with an unhandled error (eg. ctrl-c, network interruption, etc). +``` + +### Solution + +Using dbt-core python library and it's `dbtRunner` gives us the possibility to receive that _"unhandled error"_ + +```python +>>> from dbt.cli.main import dbtRunner +>>> dbt_cli = dbtRunner() +>>> dbt_cli.invoke(["ls"]) +dbtRunnerResult(success=False, exception=OSError(30, 'Read-only file system'), result=None) +``` diff --git a/docs/issues-resolutions/docker-image-debugging.md b/docs/issues-resolutions/docker-image-debugging.md new file mode 100644 index 00000000..0b2be6b8 --- /dev/null +++ b/docs/issues-resolutions/docker-image-debugging.md @@ -0,0 +1,28 @@ +# Debugging images outside Datacoves. + +Sometimes we need to review images that are running in Datacoves in a simpler way to debug processes, review the versions of libraries, versions of pipelines, etc. + +1. Create `compose.yaml` or `docker-compose.yaml` file + +```sh +version: '3' + +services: + snowflake: + image: "taqy-docker.artifactrepo.jnj.com/datacoves/ci-basic-dbt-snowflake:3.1" + command: bash -c "sleep infinity" +``` + +2. Run commands + +```sh +docker compose run --rm snowflake bash -c "pip show dbt-core dbt-snowflake" +``` + +3. Get a terminal + +```sh +docker compose up -d +docker ps +docker exec -ti /bin/bash +``` diff --git a/docs/issues-resolutions/docker-push-stopped-working.md b/docs/issues-resolutions/docker-push-stopped-working.md new file mode 100644 index 00000000..fa7df67d --- /dev/null +++ b/docs/issues-resolutions/docker-push-stopped-working.md @@ -0,0 +1,22 @@ +# Reset docker config authentication + +If that was the case, you might need to log in and log out again after a password reset: + +``` +docker logout +``` + +Then, remove the entry for taqy-docker.artifactrepo.jnj.com in `~/.docker/config.json`. + +Finally, login again: + +``` +docker login taqy-docker.artifactrepo.jnj.com +``` + +# Unlock your artifactory account + +Sometimes your account can get blocked and you need to unlock it. + +1. Go to [appdevtools](https://appdevtools.jnj.com) +2. Under support, user acces, click on `Unlock Artifactory Account`. diff --git a/docs/issues-resolutions/helm-chart.md b/docs/issues-resolutions/helm-chart.md new file mode 100644 index 00000000..6de6dc29 --- /dev/null +++ b/docs/issues-resolutions/helm-chart.md @@ -0,0 +1,45 @@ +# Helm Chart Resolutions + +## How to patch releases? + +Sometimes we want to change a value in the `Helm Chart`, but to do this we need to edit some component such as an **adapter** or the **Operator** and generate a new release, so this functionality is very useful to be able to skip that whole process and do our tests more quickly. + +### Option No.1 + +1. Get the values from the release. + +```sh +# helm get values -n +helm get values dev123-datahub -n dcw-dev123 > values.yaml +``` + +2. Edit/add the values to the file. + +```sh +vi values.yaml +``` + +3. Add the repository if does not exists. + +```sh +# helm repo add +helm repo add datahub https://helm.datahubproject.io/ +``` + +4. Patch the helm chart. + +```sh +# helm upgrade --version -f values.yaml -n +helm upgrade --version 0.4.16 -f values.yaml dev123-datahub datahub/datahub -n dcw-dev123 +``` + +### Option No.2 + +1. Patch the helm chart. + +```sh +# helm upgrade -n --set key1=value1,key2=value2 +helm upgrade dev123-datahub datahub/datahub -n dcw-dev123 --set key1=value1,key2=value2 +``` + +[More info](https://www.baeldung.com/ops/kubernetes-update-helm-values) \ No newline at end of file diff --git a/docs/issues-resolutions/pomerium-not-allowing-access.md b/docs/issues-resolutions/pomerium-not-allowing-access.md new file mode 100644 index 00000000..de18de9b --- /dev/null +++ b/docs/issues-resolutions/pomerium-not-allowing-access.md @@ -0,0 +1,19 @@ +# Pomerium does not allow access to environments + +## Problem + +Launchapd works OK, but pomerium returning timeout, logs like these are found: + +``` +{"level":"info","X-Forwarded-For":["10.255.255.2,10.10.0.8"],"X-Forwarded-Host":["authenticate-dev123.orrum.datacoves.com"],"X-Forwarded-Port":["443"],"X-Forwarded-Proto":["http"],"X-Real-Ip":["10.255.255.2"],"ip":"127.0.0.1","user_agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36","request-id":"834a4284-9d39-474a-abb5-cd7203755386","error":"Bad Request: internal/sessions: session is not found","time":"2023-08-17T13:13:39Z","message":"authenticate: session load error"} +{"level":"info","service":"envoy","upstream-cluster":"pomerium-control-plane-http","method":"GET","authority":"authenticate-dev123.orrum.datacoves.com","path":"/.pomerium","user-agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36","referer":"","forwarded-for":"10.255.255.2,10.10.0.8","request-id":"834a4284-9d39-474a-abb5-cd7203755386","duration":15000.251354,"size":24,"response-code":504,"response-code-details":"upstream_response_timeout","time":"2023-08-17T13:13:55Z","message":"http-request"} +``` + +## Cause + +This is a DNS resolution issue that pomerium is having. Typically this happens when the cluster model has wrong values on `internal_ip` or `external_ip`. +This could have happened when the DB was copied to a different cluster, of the cluster changed their IPs. + +## Solution + +Remove the values on those 2 fields and save the cluster model again. On `save`, it will regenerate those IPs and Pomerium will be reinstalled. \ No newline at end of file diff --git a/indexer.js b/indexer.js index 9accda83..c79c2808 100644 --- a/indexer.js +++ b/indexer.js @@ -32,20 +32,20 @@ const documents = []; ;(async () => { for await (const filename of getHtmlFiles(input_path)) { - const content = (await readFile(filename)).toString(); - - // We just want a chunk of this content + const content = (await readFile(filename)).toString(); const slice_content = content.split('
'); + if (slice_content.length < 2) { + continue; + } const slice_again = slice_content[1].split('
'); - const body = slice_again[0].replace(/<[^>]*>?/gm, '').trim() - + const body = slice_again[0].replace(/<[^>]*>?/gm, '').trim(); + let url = "/" + relative(input_path, filename); - - // Figure out the URL + if (url.endsWith('index.html')) { url = url.substr(0, url.length - 10); } - + documents.push({ id: relative(input_path, filename), text: body, diff --git a/infra/README.md b/infra/README.md new file mode 100644 index 00000000..11673d82 --- /dev/null +++ b/infra/README.md @@ -0,0 +1,63 @@ +# Infra notes + +## Create service user + +This is only if you are creating a brand new environment. + +``` +az ad sp create-for-rbac --name DatacovesAutomatedDeployment --role Contributor --scopes /subscriptions/91bd2205-0d74-42c9-86ad-41cca1b4822b +``` + +## Set up Azure credentials + +This should be made by the secret reveal probably. + +``` +mkdir ~/.azure +vi ~/.azure/credentials +``` + +Put in the file (comes from az command above): + +``` +[default] +subscription_id=xxx +client_id=xxx +secret=xxx +tenant=xxx +``` + +## Install requirements + +``` +pip install ansible==2.18.2 +ansible-galaxy collection install azure.azcollection --force +pip3 install -r ~/.ansible/collections/ansible_collections/azure/azcollection/requirements.txt +``` + +## Environment variables used + +``` +# REQUIRED for AZ deployment: (these come from the az command above) +export DC_AZ_SERVICE_CLIENT_ID=xxx +export DC_AZ_SERVICE_CLIENT_SECRET=xxx + +OPTIONAL for AZ deployment: +export DC_AZ_AKS_NAME=datacoves-test +export DC_AZ_RESOURCE_GROUP=DatacovesTesting +export DC_AZ_LOCATION=eastus +export DC_AZ_KUBERNETES_VERSION=1.31.5 + + +OPTIONAL for DC deployment: +export DC_RELEASE=3.3.202502202042 or latest +export OP_SERVICE_ACCOUNT_TOKEN=... +export DC_SKIP_REVEAL_SECRETS=1 +export DC_HOSTNAME=datacoves-test.datacoves.com +export DC_KUBECTL_CONTEXT=... + + +export DC_SENTRY_DSN_OPERATOR="https://b4d54fe4d14746729baa351a2d3bf4f9@o1145668.ingest.sentry.io/4504730556170240" +export DC_SENTRY_DSN="https://5d7d4b6b765d41a295ba80e70d685cf2@o1145668.ingest.sentry.io/6213267" +export DC_SLACK_TOKEN="xxx" +``` diff --git a/infra/ansible.cfg b/infra/ansible.cfg new file mode 100644 index 00000000..266f7c93 --- /dev/null +++ b/infra/ansible.cfg @@ -0,0 +1,712 @@ +[defaults] +# (boolean) By default, Ansible will issue a warning when received from a task action (module or action plugin). +# These warnings can be silenced by adjusting this setting to False. +;action_warnings=True + +# (list) Accept a list of cowsay templates that are 'safe' to use, set to an empty list if you want to enable all installed templates. +;cowsay_enabled_stencils=bud-frogs, bunny, cheese, daemon, default, dragon, elephant-in-snake, elephant, eyes, hellokitty, kitty, luke-koala, meow, milk, moofasa, moose, ren, sheep, small, stegosaurus, stimpy, supermilker, three-eyes, turkey, turtle, tux, udder, vader-koala, vader, www + +# (string) Specify a custom cowsay path or swap in your cowsay implementation of choice. +;cowpath= + +# (string) This allows you to choose a specific cowsay stencil for the banners or use 'random' to cycle through them. +;cow_selection=default + +# (boolean) This option forces color mode even when running without a TTY or the "nocolor" setting is True. +;force_color=False + +# (path) The default root path for Ansible config files on the controller. +;home=~/.ansible + +# (boolean) This setting allows suppressing colorizing output, which is used to give a better indication of failure and status information. +;nocolor=False + +# (boolean) If you have cowsay installed but want to avoid the 'cows' (why????), use this. +;nocows=False + +# (boolean) Sets the default value for the any_errors_fatal keyword, if True, Task failures will be considered fatal errors. +;any_errors_fatal=False + +# (path) The password file to use for the become plugin. ``--become-password-file``. +# If executable, it will be run and the resulting stdout will be used as the password. +;become_password_file= + +# (pathspec) Colon-separated paths in which Ansible will search for Become Plugins. +;become_plugins=/home/sconley/.ansible/plugins/become:/usr/share/ansible/plugins/become + +# (string) Chooses which cache plugin to use, the default 'memory' is ephemeral. +;fact_caching=memory + +# (string) Defines connection or path information for the cache plugin. +;fact_caching_connection= + +# (string) Prefix to use for cache plugin files/tables. +;fact_caching_prefix=ansible_facts + +# (integer) Expiration timeout for the cache plugin data. +;fact_caching_timeout=86400 + +# (list) List of enabled callbacks, not all callbacks need enabling, but many of those shipped with Ansible do as we don't want them activated by default. +;callbacks_enabled= + +# (string) When a collection is loaded that does not support the running Ansible version (with the collection metadata key `requires_ansible`). +;collections_on_ansible_version_mismatch=warning + +# (pathspec) Colon-separated paths in which Ansible will search for collections content. Collections must be in nested *subdirectories*, not directly in these directories. For example, if ``COLLECTIONS_PATHS`` includes ``'{{ ANSIBLE_HOME ~ "/collections" }}'``, and you want to add ``my.collection`` to that directory, it must be saved as ``'{{ ANSIBLE_HOME} ~ "/collections/ansible_collections/my/collection" }}'``. + +;collections_path=/home/sconley/.ansible/collections:/usr/share/ansible/collections + +# (boolean) A boolean to enable or disable scanning the sys.path for installed collections. +;collections_scan_sys_path=True + +# (path) The password file to use for the connection plugin. ``--connection-password-file``. +;connection_password_file= + +# (pathspec) Colon-separated paths in which Ansible will search for Action Plugins. +;action_plugins=/home/sconley/.ansible/plugins/action:/usr/share/ansible/plugins/action + +# (boolean) When enabled, this option allows lookup plugins (whether used in variables as ``{{lookup('foo')}}`` or as a loop as with_foo) to return data that is not marked 'unsafe'. +# By default, such data is marked as unsafe to prevent the templating engine from evaluating any jinja2 templating language, as this could represent a security risk. This option is provided to allow for backward compatibility, however, users should first consider adding allow_unsafe=True to any lookups that may be expected to contain data that may be run through the templating engine late. +;allow_unsafe_lookups=False + +# (boolean) This controls whether an Ansible playbook should prompt for a login password. If using SSH keys for authentication, you probably do not need to change this setting. +;ask_pass=False + +# (boolean) This controls whether an Ansible playbook should prompt for a vault password. +;ask_vault_pass=False + +# (pathspec) Colon-separated paths in which Ansible will search for Cache Plugins. +;cache_plugins=/home/sconley/.ansible/plugins/cache:/usr/share/ansible/plugins/cache + +# (pathspec) Colon-separated paths in which Ansible will search for Callback Plugins. +;callback_plugins=/home/sconley/.ansible/plugins/callback:/usr/share/ansible/plugins/callback + +# (pathspec) Colon-separated paths in which Ansible will search for Cliconf Plugins. +;cliconf_plugins=/home/sconley/.ansible/plugins/cliconf:/usr/share/ansible/plugins/cliconf + +# (pathspec) Colon-separated paths in which Ansible will search for Connection Plugins. +;connection_plugins=/home/sconley/.ansible/plugins/connection:/usr/share/ansible/plugins/connection + +# (boolean) Toggles debug output in Ansible. This is *very* verbose and can hinder multiprocessing. Debug output can also include secret information despite no_log settings being enabled, which means debug mode should not be used in production. +;debug=False + +# (string) This indicates the command to use to spawn a shell under, which is required for Ansible's execution needs on a target. Users may need to change this in rare instances when shell usage is constrained, but in most cases, it may be left as is. +;executable=/bin/sh + +# (pathspec) Colon-separated paths in which Ansible will search for Jinja2 Filter Plugins. +;filter_plugins=/home/sconley/.ansible/plugins/filter:/usr/share/ansible/plugins/filter + +# (boolean) This option controls if notified handlers run on a host even if a failure occurs on that host. +# When false, the handlers will not run if a failure has occurred on a host. +# This can also be set per play or on the command line. See Handlers and Failure for more details. +;force_handlers=False + +# (integer) Maximum number of forks Ansible will use to execute tasks on target hosts. +;forks=5 + +# (string) This setting controls the default policy of fact gathering (facts discovered about remote systems). +# This option can be useful for those wishing to save fact gathering time. Both 'smart' and 'explicit' will use the cache plugin. +;gathering=implicit + +# (string) This setting controls how duplicate definitions of dictionary variables (aka hash, map, associative array) are handled in Ansible. +# This does not affect variables whose values are scalars (integers, strings) or arrays. +# **WARNING**, changing this setting is not recommended as this is fragile and makes your content (plays, roles, collections) nonportable, leading to continual confusion and misuse. Don't change this setting unless you think you have an absolute need for it. +# We recommend avoiding reusing variable names and relying on the ``combine`` filter and ``vars`` and ``varnames`` lookups to create merged versions of the individual variables. In our experience, this is rarely needed and is a sign that too much complexity has been introduced into the data structures and plays. +# For some uses you can also look into custom vars_plugins to merge on input, even substituting the default ``host_group_vars`` that is in charge of parsing the ``host_vars/`` and ``group_vars/`` directories. Most users of this setting are only interested in inventory scope, but the setting itself affects all sources and makes debugging even harder. +# All playbooks and roles in the official examples repos assume the default for this setting. +# Changing the setting to ``merge`` applies across variable sources, but many sources will internally still overwrite the variables. For example ``include_vars`` will dedupe variables internally before updating Ansible, with 'last defined' overwriting previous definitions in same file. +# The Ansible project recommends you **avoid ``merge`` for new projects.** +# It is the intention of the Ansible developers to eventually deprecate and remove this setting, but it is being kept as some users do heavily rely on it. New projects should **avoid 'merge'**. +;hash_behaviour=replace + +# (pathlist) Comma-separated list of Ansible inventory sources +inventory=./variables.yml + +# (pathspec) Colon-separated paths in which Ansible will search for HttpApi Plugins. +;httpapi_plugins=/home/sconley/.ansible/plugins/httpapi:/usr/share/ansible/plugins/httpapi + +# (float) This sets the interval (in seconds) of Ansible internal processes polling each other. Lower values improve performance with large playbooks at the expense of extra CPU load. Higher values are more suitable for Ansible usage in automation scenarios when UI responsiveness is not required but CPU usage might be a concern. +# The default corresponds to the value hardcoded in Ansible <= 2.1 +;internal_poll_interval=0.001 + +# (pathspec) Colon-separated paths in which Ansible will search for Inventory Plugins. +;inventory_plugins=/home/sconley/.ansible/plugins/inventory:/usr/share/ansible/plugins/inventory + +# (string) This is a developer-specific feature that allows enabling additional Jinja2 extensions. +# See the Jinja2 documentation for details. If you do not know what these do, you probably don't need to change this setting :) +;jinja2_extensions=[] + +# (boolean) This option preserves variable types during template operations. +;jinja2_native=False + +# (boolean) Enables/disables the cleaning up of the temporary files Ansible used to execute the tasks on the remote. +# If this option is enabled it will disable ``ANSIBLE_PIPELINING``. +;keep_remote_files=False + +# (boolean) Controls whether callback plugins are loaded when running /usr/bin/ansible. This may be used to log activity from the command line, send notifications, and so on. Callback plugins are always loaded for ``ansible-playbook``. +;bin_ansible_callbacks=False + +# (tmppath) Temporary directory for Ansible to use on the controller. +;local_tmp=/home/sconley/.ansible/tmp + +# (list) List of logger names to filter out of the log file. +;log_filter= + +# (path) File to which Ansible will log on the controller. +# When not set the logging is disabled. +;log_path= + +# (pathspec) Colon-separated paths in which Ansible will search for Lookup Plugins. +;lookup_plugins=/home/sconley/.ansible/plugins/lookup:/usr/share/ansible/plugins/lookup + +# (string) Sets the macro for the 'ansible_managed' variable available for :ref:`ansible_collections.ansible.builtin.template_module` and :ref:`ansible_collections.ansible.windows.win_template_module`. This is only relevant to those two modules. +;ansible_managed=Ansible managed + +# (string) This sets the default arguments to pass to the ``ansible`` adhoc binary if no ``-a`` is specified. +;module_args= + +# (string) Compression scheme to use when transferring Python modules to the target. +;module_compression=ZIP_DEFLATED + +# (string) Module to use with the ``ansible`` AdHoc command, if none is specified via ``-m``. +;module_name=command + +# (pathspec) Colon-separated paths in which Ansible will search for Modules. +;library=/home/sconley/.ansible/plugins/modules:/usr/share/ansible/plugins/modules + +# (pathspec) Colon-separated paths in which Ansible will search for Module utils files, which are shared by modules. +;module_utils=/home/sconley/.ansible/plugins/module_utils:/usr/share/ansible/plugins/module_utils + +# (pathspec) Colon-separated paths in which Ansible will search for Netconf Plugins. +;netconf_plugins=/home/sconley/.ansible/plugins/netconf:/usr/share/ansible/plugins/netconf + +# (boolean) Toggle Ansible's display and logging of task details, mainly used to avoid security disclosures. +;no_log=False + +# (boolean) Toggle Ansible logging to syslog on the target when it executes tasks. On Windows hosts, this will disable a newer style PowerShell modules from writing to the event log. +;no_target_syslog=False + +# (raw) What templating should return as a 'null' value. When not set it will let Jinja2 decide. +;null_representation= + +# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how often to check back on the status of those tasks when an explicit poll interval is not supplied. The default is a reasonably moderate 15 seconds which is a tradeoff between checking in frequently and providing a quick turnaround when something may have completed. +;poll_interval=15 + +# (path) Option for connections using a certificate or key file to authenticate, rather than an agent or passwords, you can set the default value here to avoid re-specifying ``--private-key`` with every invocation. +;private_key_file= + +# (boolean) By default, imported roles publish their variables to the play and other roles, this setting can avoid that. +# This was introduced as a way to reset role variables to default values if a role is used more than once in a playbook. +# Starting in version '2.17' M(ansible.builtin.include_roles) and M(ansible.builtin.import_roles) can individually override this via the C(public) parameter. +# Included roles only make their variables public at execution, unlike imported roles which happen at playbook compile time. +;private_role_vars=False + +# (integer) Port to use in remote connections, when blank it will use the connection plugin default. +;remote_port= + +# (string) Sets the login user for the target machines +# When blank it uses the connection plugin's default, normally the user currently executing Ansible. +;remote_user= + +# (pathspec) Colon-separated paths in which Ansible will search for Roles. +;roles_path=/home/sconley/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles + +# (string) Set the main callback used to display Ansible output. You can only have one at a time. +# You can have many other callbacks, but just one can be in charge of stdout. +# See :ref:`callback_plugins` for a list of available options. +;stdout_callback=default + +# (string) Set the default strategy used for plays. +;strategy=linear + +# (pathspec) Colon-separated paths in which Ansible will search for Strategy Plugins. +;strategy_plugins=/home/sconley/.ansible/plugins/strategy:/usr/share/ansible/plugins/strategy + +# (boolean) Toggle the use of "su" for tasks. +;su=False + +# (string) Syslog facility to use when Ansible logs to the remote target. +;syslog_facility=LOG_USER + +# (pathspec) Colon-separated paths in which Ansible will search for Terminal Plugins. +;terminal_plugins=/home/sconley/.ansible/plugins/terminal:/usr/share/ansible/plugins/terminal + +# (pathspec) Colon-separated paths in which Ansible will search for Jinja2 Test Plugins. +;test_plugins=/home/sconley/.ansible/plugins/test:/usr/share/ansible/plugins/test + +# (integer) This is the default timeout for connection plugins to use. +;timeout=10 + +# (string) Can be any connection plugin available to your ansible installation. +# There is also a (DEPRECATED) special 'smart' option, that will toggle between 'ssh' and 'paramiko' depending on controller OS and ssh versions. +;transport=ssh + +# (boolean) When True, this causes ansible templating to fail steps that reference variable names that are likely typoed. +# Otherwise, any '{{ template_expression }}' that contains undefined variables will be rendered in a template or ansible action line exactly as written. +;error_on_undefined_vars=True + +# (pathspec) Colon-separated paths in which Ansible will search for Vars Plugins. +;vars_plugins=/home/sconley/.ansible/plugins/vars:/usr/share/ansible/plugins/vars + +# (string) The vault_id to use for encrypting by default. If multiple vault_ids are provided, this specifies which to use for encryption. The ``--encrypt-vault-id`` CLI option overrides the configured value. +;vault_encrypt_identity= + +# (string) The label to use for the default vault id label in cases where a vault id label is not provided. +;vault_identity=default + +# (list) A list of vault-ids to use by default. Equivalent to multiple ``--vault-id`` args. Vault-ids are tried in order. +;vault_identity_list= + +# (string) If true, decrypting vaults with a vault id will only try the password from the matching vault-id. +;vault_id_match=False + +# (path) The vault password file to use. Equivalent to ``--vault-password-file`` or ``--vault-id``. +# If executable, it will be run and the resulting stdout will be used as the password. +;vault_password_file= + +# (integer) Sets the default verbosity, equivalent to the number of ``-v`` passed in the command line. +;verbosity=0 + +# (boolean) Toggle to control the showing of deprecation warnings +;deprecation_warnings=True + +# (boolean) Toggle to control showing warnings related to running devel. +;devel_warning=True + +# (boolean) Normally ``ansible-playbook`` will print a header for each task that is run. These headers will contain the name: field from the task if you specified one. If you didn't then ``ansible-playbook`` uses the task's action to help you tell which task is presently running. Sometimes you run many of the same action and so you want more information about the task to differentiate it from others of the same action. If you set this variable to True in the config then ``ansible-playbook`` will also include the task's arguments in the header. +# This setting defaults to False because there is a chance that you have sensitive values in your parameters and you do not want those to be printed. +# If you set this to True you should be sure that you have secured your environment's stdout (no one can shoulder surf your screen and you aren't saving stdout to an insecure file) or made sure that all of your playbooks explicitly added the ``no_log: True`` parameter to tasks that have sensitive values :ref:`keep_secret_data` for more information. +;display_args_to_stdout=False + +# (boolean) Toggle to control displaying skipped task/host entries in a task in the default callback. +;display_skipped_hosts=True + +# (string) Root docsite URL used to generate docs URLs in warning/error text; must be an absolute URL with a valid scheme and trailing slash. +;docsite_root_url=https://docs.ansible.com/ansible-core/ + +# (pathspec) Colon-separated paths in which Ansible will search for Documentation Fragments Plugins. +;doc_fragment_plugins=/home/sconley/.ansible/plugins/doc_fragments:/usr/share/ansible/plugins/doc_fragments + +# (string) By default, Ansible will issue a warning when a duplicate dict key is encountered in YAML. +# These warnings can be silenced by adjusting this setting to False. +;duplicate_dict_key=warn + +# (string) for the cases in which Ansible needs to return a file within an editor, this chooses the application to use. +;editor=vi + +# (boolean) Whether or not to enable the task debugger, this previously was done as a strategy plugin. +# Now all strategy plugins can inherit this behavior. The debugger defaults to activating when +# a task is failed on unreachable. Use the debugger keyword for more flexibility. +;enable_task_debugger=False + +# (boolean) Toggle to allow missing handlers to become a warning instead of an error when notifying. +;error_on_missing_handler=True + +# (list) Which modules to run during a play's fact gathering stage, using the default of 'smart' will try to figure it out based on connection type. +# If adding your own modules but you still want to use the default Ansible facts, you will want to include 'setup' or corresponding network module to the list (if you add 'smart', Ansible will also figure it out). +# This does not affect explicit calls to the 'setup' module, but does always affect the 'gather_facts' action (implicit or explicit). +;facts_modules=smart + +# (boolean) Set this to "False" if you want to avoid host key checking by the underlying connection plugin Ansible uses to connect to the host. +# Please read the documentation of the specific connection plugin used for details. +;host_key_checking=True + +# (boolean) Facts are available inside the `ansible_facts` variable, this setting also pushes them as their own vars in the main namespace. +# Unlike inside the `ansible_facts` dictionary where the prefix `ansible_` is removed from fact names, these will have the exact names that are returned by the module. +;inject_facts_as_vars=True + +# (string) Path to the Python interpreter to be used for module execution on remote targets, or an automatic discovery mode. Supported discovery modes are ``auto`` (the default), ``auto_silent``, ``auto_legacy``, and ``auto_legacy_silent``. All discovery modes employ a lookup table to use the included system Python (on distributions known to include one), falling back to a fixed ordered list of well-known Python interpreter locations if a platform-specific default is not available. The fallback behavior will issue a warning that the interpreter should be set explicitly (since interpreters installed later may change which one is used). This warning behavior can be disabled by setting ``auto_silent`` or ``auto_legacy_silent``. The value of ``auto_legacy`` provides all the same behavior, but for backward-compatibility with older Ansible releases that always defaulted to ``/usr/bin/python``, will use that interpreter if present. +;interpreter_python=auto + +# (boolean) If 'false', invalid attributes for a task will result in warnings instead of errors. +;invalid_task_attribute_failed=True + +# (boolean) By default, Ansible will issue a warning when there are no hosts in the inventory. +# These warnings can be silenced by adjusting this setting to False. +;localhost_warning=True + +# (int) This will set log verbosity if higher than the normal display verbosity, otherwise it will match that. +;log_verbosity= + +# (int) Maximum size of files to be considered for diff display. +;max_diff_size=104448 + +# (list) List of extensions to ignore when looking for modules to load. +# This is for rejecting script and binary module fallback extensions. +;module_ignore_exts=.pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, .rst, .yaml, .yml, .ini + +# (bool) Enables whether module responses are evaluated for containing non-UTF-8 data. +# Disabling this may result in unexpected behavior. +# Only ansible-core should evaluate this configuration. +;module_strict_utf8_response=True + +# (list) TODO: write it +;network_group_modules=eos, nxos, ios, iosxr, junos, enos, ce, vyos, sros, dellos9, dellos10, dellos6, asa, aruba, aireos, bigip, ironware, onyx, netconf, exos, voss, slxos + +# (boolean) Previously Ansible would only clear some of the plugin loading caches when loading new roles, this led to some behaviors in which a plugin loaded in previous plays would be unexpectedly 'sticky'. This setting allows the user to return to that behavior. +;old_plugin_cache_clear=False + +# (string) for the cases in which Ansible needs to return output in a pageable fashion, this chooses the application to use. +;pager=less + +# (path) A number of non-playbook CLIs have a ``--playbook-dir`` argument; this sets the default value for it. +;playbook_dir= + +# (string) This sets which playbook dirs will be used as a root to process vars plugins, which includes finding host_vars/group_vars. +;playbook_vars_root=top + +# (path) A path to configuration for filtering which plugins installed on the system are allowed to be used. +# See :ref:`plugin_filtering_config` for details of the filter file's format. +# The default is /etc/ansible/plugin_filters.yml +;plugin_filters_cfg= + +# (string) Attempts to set RLIMIT_NOFILE soft limit to the specified value when executing Python modules (can speed up subprocess usage on Python 2.x. See https://bugs.python.org/issue11284). The value will be limited by the existing hard limit. Default value of 0 does not attempt to adjust existing system-defined limits. +;python_module_rlimit_nofile=0 + +# (bool) This controls whether a failed Ansible playbook should create a .retry file. +;retry_files_enabled=False + +# (path) This sets the path in which Ansible will save .retry files when a playbook fails and retry files are enabled. +# This file will be overwritten after each run with the list of failed hosts from all plays. +;retry_files_save_path= + +# (str) This setting can be used to optimize vars_plugin usage depending on the user's inventory size and play selection. +;run_vars_plugins=demand + +# (bool) This adds the custom stats set via the set_stats plugin to the default output. +;show_custom_stats=False + +# (string) Action to take when a module parameter value is converted to a string (this does not affect variables). For string parameters, values such as '1.00', "['a', 'b',]", and 'yes', 'y', etc. will be converted by the YAML parser unless fully quoted. +# Valid options are 'error', 'warn', and 'ignore'. +# Since 2.8, this option defaults to 'warn' but will change to 'error' in 2.12. +;string_conversion_action=warn + +# (boolean) Allows disabling of warnings related to potential issues on the system running Ansible itself (not on the managed hosts). +# These may include warnings about third-party packages or other conditions that should be resolved if possible. +;system_warnings=True + +# (string) A string to insert into target logging for tracking purposes +;target_log_info= + +# (boolean) This option defines whether the task debugger will be invoked on a failed task when ignore_errors=True is specified. +# True specifies that the debugger will honor ignore_errors, and False will not honor ignore_errors. +;task_debugger_ignore_errors=True + +# (integer) Set the maximum time (in seconds) for a task action to execute in. +# Timeout runs independently from templating or looping. It applies per each attempt of executing the task's action and remains unchanged by the total time spent on a task. +# When the action execution exceeds the timeout, Ansible interrupts the process. This is registered as a failure due to outside circumstances, not a task failure, to receive appropriate response and recovery process. +# If set to 0 (the default) there is no timeout. +;task_timeout=0 + +# (string) Make ansible transform invalid characters in group names supplied by inventory sources. +;force_valid_group_names=never + +# (boolean) Toggles the use of persistence for connections. +;use_persistent_connections=False + +# (bool) A toggle to disable validating a collection's 'metadata' entry for a module_defaults action group. Metadata containing unexpected fields or value types will produce a warning when this is True. +;validate_action_group_metadata=True + +# (list) Accept list for variable plugins that require it. +;vars_plugins_enabled=host_group_vars + +# (list) Allows to change the group variable precedence merge order. +;precedence=all_inventory, groups_inventory, all_plugins_inventory, all_plugins_play, groups_plugins_inventory, groups_plugins_play + +# (string) The salt to use for the vault encryption. If it is not provided, a random salt will be used. +;vault_encrypt_salt= + +# (bool) Force 'verbose' option to use stderr instead of stdout +;verbose_to_stderr=False + +# (integer) For asynchronous tasks in Ansible (covered in Asynchronous Actions and Polling), this is how long, in seconds, to wait for the task spawned by Ansible to connect back to the named pipe used on Windows systems. The default is 5 seconds. This can be too low on slower systems, or systems under heavy load. +# This is not the total time an async command can run for, but is a separate timeout to wait for an async command to start. The task will only start to be timed against its async_timeout once it has connected to the pipe, so the overall maximum duration the task can take will be extended by the amount specified here. +;win_async_startup_timeout=5 + +# (list) Check all of these extensions when looking for 'variable' files which should be YAML or JSON or vaulted versions of these. +# This affects vars_files, include_vars, inventory and vars plugins among others. +;yaml_valid_extensions=.yml, .yaml, .json + + +[privilege_escalation] +# (boolean) Display an agnostic become prompt instead of displaying a prompt containing the command line supplied become method. +;agnostic_become_prompt=True + +# (boolean) When ``False``(default), Ansible will skip using become if the remote user is the same as the become user, as this is normally a redundant operation. In other words root sudo to root. +# If ``True``, this forces Ansible to use the become plugin anyways as there are cases in which this is needed. +;become_allow_same_user=False + +# (boolean) Toggles the use of privilege escalation, allowing you to 'become' another user after login. +;become=False + +# (boolean) Toggle to prompt for privilege escalation password. +;become_ask_pass=False + +# (string) executable to use for privilege escalation, otherwise Ansible will depend on PATH. +;become_exe= + +# (string) Flags to pass to the privilege escalation executable. +;become_flags= + +# (string) Privilege escalation method to use when `become` is enabled. +;become_method=sudo + +# (string) The user your login/remote user 'becomes' when using privilege escalation, most systems will use 'root' when no user is specified. +;become_user=root + + +[persistent_connection] +# (path) Specify where to look for the ansible-connection script. This location will be checked before searching $PATH. +# If null, ansible will start with the same directory as the ansible script. +;ansible_connection_path= + +# (int) This controls the amount of time to wait for a response from a remote device before timing out a persistent connection. +;command_timeout=30 + +# (integer) This controls the retry timeout for persistent connection to connect to the local domain socket. +;connect_retry_timeout=15 + +# (integer) This controls how long the persistent connection will remain idle before it is destroyed. +;connect_timeout=30 + +# (path) Path to the socket to be used by the connection persistence system. +;control_path_dir=/home/sconley/.ansible/pc + + +[connection] +# (boolean) This is a global option, each connection plugin can override either by having more specific options or not supporting pipelining at all. +# Pipelining, if supported by the connection plugin, reduces the number of network operations required to execute a module on the remote server, by executing many Ansible modules without actual file transfer. +# It can result in a very significant performance improvement when enabled. +# However this conflicts with privilege escalation (become). For example, when using 'sudo:' operations you must first disable 'requiretty' in /etc/sudoers on all managed hosts, which is why it is disabled by default. +# This setting will be disabled if ``ANSIBLE_KEEP_REMOTE_FILES`` is enabled. +;pipelining=False + + +[colors] +# (string) Defines the color to use on 'Changed' task status. +;changed=yellow + +# (string) Defines the default color to use for ansible-console. +;console_prompt=white + +# (string) Defines the color to use when emitting debug messages. +;debug=dark gray + +# (string) Defines the color to use when emitting deprecation messages. +;deprecate=purple + +# (string) Defines the color to use when showing added lines in diffs. +;diff_add=green + +# (string) Defines the color to use when showing diffs. +;diff_lines=cyan + +# (string) Defines the color to use when showing removed lines in diffs. +;diff_remove=red + +# (string) Defines the color to use when emitting a constant in the ansible-doc output. +;doc_constant=dark gray + +# (string) Defines the color to use when emitting a deprecated value in the ansible-doc output. +;doc_deprecated=magenta + +# (string) Defines the color to use when emitting a link in the ansible-doc output. +;doc_link=cyan + +# (string) Defines the color to use when emitting a module name in the ansible-doc output. +;doc_module=yellow + +# (string) Defines the color to use when emitting a plugin name in the ansible-doc output. +;doc_plugin=yellow + +# (string) Defines the color to use when emitting cross-reference in the ansible-doc output. +;doc_reference=magenta + +# (string) Defines the color to use when emitting error messages. +;error=red + +# (string) Defines the color to use for highlighting. +;highlight=white + +# (string) Defines the color to use when showing 'Included' task status. +;included=cyan + +# (string) Defines the color to use when showing 'OK' task status. +;ok=green + +# (string) Defines the color to use when showing 'Skipped' task status. +;skip=cyan + +# (string) Defines the color to use on 'Unreachable' status. +;unreachable=bright red + +# (string) Defines the color to use when emitting verbose messages. In other words, those that show with '-v's. +;verbose=blue + +# (string) Defines the color to use when emitting warning messages. +;warn=bright purple + + +[selinux] +# (boolean) This setting causes libvirt to connect to LXC containers by passing ``--noseclabel`` parameter to ``virsh`` command. This is necessary when running on systems which do not have SELinux. +;libvirt_lxc_noseclabel=False + +# (list) Some filesystems do not support safe operations and/or return inconsistent errors, this setting makes Ansible 'tolerate' those in the list without causing fatal errors. +# Data corruption may occur and writes are not always verified when a filesystem is in the list. +;special_context_filesystems=fuse, nfs, vboxsf, ramfs, 9p, vfat + + +[diff] +# (bool) Configuration toggle to tell modules to show differences when in 'changed' status, equivalent to ``--diff``. +;always=False + +# (integer) Number of lines of context to show when displaying the differences between files. +;context=3 + + +[galaxy] +# (path) The directory that stores cached responses from a Galaxy server. +# This is only used by the ``ansible-galaxy collection install`` and ``download`` commands. +# Cache files inside this dir will be ignored if they are world writable. +;cache_dir=/home/sconley/.ansible/galaxy_cache + +# (bool) whether ``ansible-galaxy collection install`` should warn about ``--collections-path`` missing from configured :ref:`collections_paths`. +;collections_path_warning=True + +# (path) Collection skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy collection``, same as ``--collection-skeleton``. +;collection_skeleton= + +# (list) patterns of files to ignore inside a Galaxy collection skeleton directory. +;collection_skeleton_ignore=^.git$, ^.*/.git_keep$ + +# (bool) Disable GPG signature verification during collection installation. +;disable_gpg_verify=False + +# (bool) Some steps in ``ansible-galaxy`` display a progress wheel which can cause issues on certain displays or when outputting the stdout to a file. +# This config option controls whether the display wheel is shown or not. +# The default is to show the display wheel if stdout has a tty. +;display_progress= + +# (path) Configure the keyring used for GPG signature verification during collection installation and verification. +;gpg_keyring= + +# (boolean) If set to yes, ansible-galaxy will not validate TLS certificates. This can be useful for testing against a server with a self-signed certificate. +;ignore_certs= + +# (list) A list of GPG status codes to ignore during GPG signature verification. See L(https://github.com/gpg/gnupg/blob/master/doc/DETAILS#general-status-codes) for status code descriptions. +# If fewer signatures successfully verify the collection than `GALAXY_REQUIRED_VALID_SIGNATURE_COUNT`, signature verification will fail even if all error codes are ignored. +;ignore_signature_status_codes= + +# (str) The number of signatures that must be successful during GPG signature verification while installing or verifying collections. +# This should be a positive integer or all to indicate all signatures must successfully validate the collection. +# Prepend + to the value to fail if no valid signatures are found for the collection. +;required_valid_signature_count=1 + +# (path) Role skeleton directory to use as a template for the ``init`` action in ``ansible-galaxy``/``ansible-galaxy role``, same as ``--role-skeleton``. +;role_skeleton= + +# (list) patterns of files to ignore inside a Galaxy role or collection skeleton directory. +;role_skeleton_ignore=^.git$, ^.*/.git_keep$ + +# (string) URL to prepend when roles don't specify the full URI, assume they are referencing this server as the source. +;server=https://galaxy.ansible.com + +# (list) A list of Galaxy servers to use when installing a collection. +# The value corresponds to the config ini header ``[galaxy_server.{{item}}]`` which defines the server details. +# See :ref:`galaxy_server_config` for more details on how to define a Galaxy server. +# The order of servers in this list is used as the order in which a collection is resolved. +# Setting this config option will ignore the :ref:`galaxy_server` config option. +;server_list= + +# (int) The default timeout for Galaxy API calls. Galaxy servers that don't configure a specific timeout will fall back to this value. +;server_timeout=60 + +# (path) Local path to galaxy access token file +;token_path=/home/sconley/.ansible/galaxy_token + + +[inventory] +# (string) This setting changes the behaviour of mismatched host patterns, it allows you to force a fatal error, a warning or just ignore it. +;host_pattern_mismatch=warning + +# (boolean) If 'true', it is a fatal error when any given inventory source cannot be successfully parsed by any available inventory plugin; otherwise, this situation only attracts a warning. + +;any_unparsed_is_failed=False + +# (bool) Toggle to turn on inventory caching. +# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. +# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory configuration. +# This message will be removed in 2.16. +;cache=False + +# (string) The plugin for caching inventory. +# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. +# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. +# This message will be removed in 2.16. +;cache_plugin= + +# (string) The inventory cache connection. +# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. +# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. +# This message will be removed in 2.16. +;cache_connection= + +# (string) The table prefix for the cache plugin. +# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. +# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. +# This message will be removed in 2.16. +;cache_prefix=ansible_inventory_ + +# (string) Expiration timeout for the inventory cache plugin data. +# This setting has been moved to the individual inventory plugins as a plugin option :ref:`inventory_plugins`. +# The existing configuration settings are still accepted with the inventory plugin adding additional options from inventory and fact cache configuration. +# This message will be removed in 2.16. +;cache_timeout=3600 + +# (list) List of enabled inventory plugins, it also determines the order in which they are used. +;enable_plugins=host_list, script, auto, yaml, ini, toml + +# (bool) Controls if ansible-inventory will accurately reflect Ansible's view into inventory or its optimized for exporting. +;export=False + +# (list) List of extensions to ignore when using a directory as an inventory source. +;ignore_extensions=.pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, .rst, .orig, .ini, .cfg, .retry + +# (list) List of patterns to ignore when using a directory as an inventory source. +;ignore_patterns= + +# (bool) If 'true' it is a fatal error if every single potential inventory source fails to parse, otherwise, this situation will only attract a warning. + +;unparsed_is_failed=False + +# (boolean) By default, Ansible will issue a warning when no inventory was loaded and notes that it will use an implicit localhost-only inventory. +# These warnings can be silenced by adjusting this setting to False. +;inventory_unparsed_warning=True + + +[netconf_connection] +# (string) This variable is used to enable bastion/jump host with netconf connection. If set to True the bastion/jump host ssh settings should be present in ~/.ssh/config file, alternatively it can be set to custom ssh configuration file path to read the bastion/jump host settings. +;ssh_config= + + +[paramiko_connection] +# (boolean) TODO: write it +;host_key_auto_add=False + +# (boolean) TODO: write it +;look_for_keys=True + + +[jinja2] +# (list) This list of filters avoids 'type conversion' when templating variables. +# Useful when you want to avoid conversion into lists or dictionaries for JSON strings, for example. +;dont_type_filters=string, to_json, to_nice_json, to_yaml, to_nice_yaml, ppretty, json + + +[tags] +# (list) default list of tags to run in your plays, Skip Tags has precedence. +;run= + +# (list) default list of tags to skip in your plays, has precedence over Run Tags +;skip= + diff --git a/infra/azure-setup.yml b/infra/azure-setup.yml new file mode 100644 index 00000000..f1da7c97 --- /dev/null +++ b/infra/azure-setup.yml @@ -0,0 +1,174 @@ +- name: Create Azure Kubernetes Service + hosts: localhost + connection: local + collections: + - azure.azcollection + tasks: + - name: Create resource group + azure_rm_resourcegroup: + name: "{{ resource_group }}" + location: "{{ location }}" + + - name: Create a managed Azure Container Services (AKS) cluster + register: aks + azure_rm_aks: + name: "{{ aks_name }}" + location: "{{ location }}" + resource_group: "{{ resource_group }}" + dns_prefix: "{{ aks_name }}-dns" + kubernetes_version: "{{ kubernetes_version }}" + service_principal: + client_id: "{{ service_client_id }}" + client_secret: "{{ service_client_secret }}" + enable_rbac: true + auto_upgrade_profile: + node_os_upgrade_channel: NodeImage + upgrade_channel: patch + disable_local_accounts: false + network_profile: + network_plugin: kubenet + load_balancer_sku: standard + network_policy: calico + #pod_cidr: "10.244.0.0/16" + #service_cidr: "10.0.0.0/16" + security_profile: + image_cleaner: + enabled: false + interval_hours: 168 + agent_pool_profiles: + - name: system + count: 1 + vm_size: Standard_D2s_v3 + enable_auto_scaling: true + max_count: 4 + max_pods: 100 + min_count: 1 + mode: System + orchestrator_version: "{{ kubernetes_version }}" + os_disk_size_gb: 128 + os_sku: Ubuntu + os_type: Linux + type: VirtualMachineScaleSets + storage_profiles: ManagedDisks + + - name: general + count: 1 + vm_size: Standard_D4s_v3 + enable_auto_scaling: true + max_count: 4 + max_pods: 100 + min_count: 1 + mode: User + node_labels: + "k8s.datacoves.com/nodegroup-kind": general + orchestrator_version: "{{ kubernetes_version }}" + os_disk_size_gb: 128 + os_sku: Ubuntu + os_type: Linux + type: VirtualMachineScaleSets + storage_profiles: ManagedDisks + + - name: volumed + count: 1 + vm_size: Standard_D16s_v5 + enable_auto_scaling: true + max_count: 4 + max_pods: 100 + min_count: 1 + mode: User + node_labels: + "k8s.datacoves.com/nodegroup-kind": volumed + orchestrator_version: "{{ kubernetes_version }}" + os_disk_size_gb: 512 + os_sku: Ubuntu + os_type: Linux + type: VirtualMachineScaleSets + storage_profiles: ManagedDisks + + - name: workers + count: 1 + vm_size: Standard_D4s_v3 + enable_auto_scaling: true + max_count: 4 + max_pods: 100 + min_count: 1 + mode: User + node_labels: + "k8s.datacoves.com/workers": enabled + orchestrator_version: "{{ kubernetes_version }}" + os_disk_size_gb: 128 + os_sku: Ubuntu + os_type: Linux + type: VirtualMachineScaleSets + storage_profiles: ManagedDisks + + - name: Create Kubeconfig file + copy: + content: "{{ aks['kube_config'][0] }}" + dest: "../{{ aks_name }}.kubeconfig" + + - name: Get Networking Info + register: network + azure_rm_virtualnetwork_info: + resource_group: "MC_{{ resource_group }}_{{ aks_name }}_{{ location }}" + + - name: Create PostgreSQL Subnet + register: db_subnet + azure_rm_subnet: + resource_group: "MC_{{ resource_group }}_{{ aks_name }}_{{ location }}" + virtual_network_name: "{{ network['virtualnetworks'][0]['name'] }}" + name: db-subnet + address_prefix_cidr: "10.225.0.0/24" + private_endpoint_network_policies: Disabled + private_link_service_network_policies: Enabled + delegations: + - name: "Microsoft.DBforPostgreSQL/flexibleServers" + actions: + - "Microsoft.Network/virtualNetworks/subnets/join/action" + serviceName: "Microsoft.DBforPostgreSQL/flexibleServers" + + - name: Create Private DNS Zone + register: private_dns + azure_rm_privatednszone: + resource_group: "MC_{{ resource_group }}_{{ aks_name }}_{{ location }}" + name: "{{ aks_name }}.private.postgres.database.azure.com" + + - name: Link Private DNS Zone to Virtual Network + azure_rm_privatednszonelink: + resource_group: "MC_{{ resource_group }}_{{ aks_name }}_{{ location }}" + virtual_network: "{{ network['virtualnetworks'][0]['id'] }}" + zone_name: "{{ private_dns['state']['name'] }}" + name: "why-must-i-name-this-{{ aks_name }}" + registration_enabled: true + + - name: Dump AKS + debug: + var: private_dns + + - name: Create PostgreSQL Database + register: db + azure_rm_postgresqlflexibleserver: + administrator_login: datacoves_admin + administrator_login_password: "{{ database_password }}" + backup: + backup_retention_days: 7 + geo_redundant_backup: Disabled + high_availability: + mode: Disabled + location: "{{ location }}" + resource_group: "{{ resource_group }}" + name: "{{ aks_name }}-db" + network: + delegated_subnet_resource_id: "{{ db_subnet['state']['id'] }}" + private_dns_zone_arm_resource_id: "{{ private_dns['state']['id'] }}" + public_network_access: Disabled + sku: + name: "Standard_D2ads_v5" + tier: GeneralPurpose + storage: + storage_size_gb: 128 + version: 13 + + +- name: Call Datacoves Install Playbook + ansible.builtin.import_playbook: "datacoves-install.yml" diff --git a/infra/azure-teardown.yml b/infra/azure-teardown.yml new file mode 100644 index 00000000..bb1fc379 --- /dev/null +++ b/infra/azure-teardown.yml @@ -0,0 +1,12 @@ +- name: Delete Azure Kubernetes Service + hosts: localhost + collections: + - azure.azcollection + connection: local + tasks: + - name: Delete resource group + azure_rm_resourcegroup: + name: "{{ resource_group }}" + location: "{{ location }}" + state: absent + force_delete_nonempty: true diff --git a/infra/base/cert-manager-v1.11.0.yaml b/infra/base/cert-manager-v1.11.0.yaml new file mode 100644 index 00000000..1a56ff3b --- /dev/null +++ b/infra/base/cert-manager-v1.11.0.yaml @@ -0,0 +1,5546 @@ +# https://github.com/jetstack/cert-manager/releases/download/v1.11.0/cert-manager.yaml + +# Copyright 2022 The cert-manager Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: cert-manager +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: clusterissuers.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: ClusterIssuer + listKind: ClusterIssuerList + plural: clusterissuers + singular: clusterissuer + categories: + - cert-manager + scope: Cluster + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: A ClusterIssuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is similar to an Issuer, however it is cluster-scoped and therefore can be referenced by resources that exist in *any* namespace, not just the same namespace as the referent. + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the ClusterIssuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. + type: string + format: byte + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: "Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme." + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: "INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false." + type: boolean + solvers: + description: "Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/" + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: "API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions." + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: "The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``." + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: "The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: string + accessKeyIDSecretRef: + description: "The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: "The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: "When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways" + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the ClusterIssuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: challenges.acme.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: acme.cert-manager.io + names: + kind: Challenge + listKind: ChallengeList + plural: challenges + singular: challenge + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.dnsName + name: Domain + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1 + schema: + openAPIV3Schema: + description: Challenge is a type to represent a Challenge request with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + type: object + required: + - authorizationURL + - dnsName + - issuerRef + - key + - solver + - token + - type + - url + properties: + authorizationURL: + description: The URL to the ACME Authorization resource that this challenge is a part of. + type: string + dnsName: + description: dnsName is the identifier that this challenge is for, e.g. example.com. If the requested DNSName is a 'wildcard', this field MUST be set to the non-wildcard domain, e.g. for `*.example.com`, it must be `example.com`. + type: string + issuerRef: + description: References a properly configured ACME-type Issuer which should be used to create this Challenge. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Challenge will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + key: + description: "The ACME challenge key for this challenge For HTTP01 challenges, this is the value that must be responded with to complete the HTTP01 challenge in the format: `.`. For DNS01 challenges, this is the base64 encoded SHA256 sum of the `.` text that must be set as the TXT record content." + type: string + solver: + description: Contains the domain solving configuration that should be used to solve this challenge resource. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: "API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions." + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: "The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``." + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: "The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: string + accessKeyIDSecretRef: + description: "The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: "The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: "When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways" + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + token: + description: The ACME challenge token for this challenge. This is the raw value returned from the ACME server. + type: string + type: + description: The type of ACME challenge this resource represents. One of "HTTP-01" or "DNS-01". + type: string + enum: + - HTTP-01 + - DNS-01 + url: + description: The URL of the ACME Challenge resource for this challenge. This can be used to lookup details about the status of this challenge. + type: string + wildcard: + description: wildcard will be true if this challenge is for a wildcard identifier, for example '*.example.com'. + type: boolean + status: + type: object + properties: + presented: + description: presented will be set to true if the challenge values for this challenge are currently 'presented'. This *does not* imply the self check is passing. Only that the values have been 'submitted' for the appropriate challenge mechanism (i.e. the DNS01 TXT record has been presented, or the HTTP01 configuration has been configured). + type: boolean + processing: + description: Used to denote whether this challenge should be processed or not. This field will only be set to true by the 'scheduling' component. It will only be set to false by the 'challenges' controller, after the challenge has reached a final state or timed out. If this field is set to false, the challenge controller will not take any more action. + type: boolean + reason: + description: Contains human readable information on why the Challenge is in the current state. + type: string + state: + description: Contains the current 'state' of the challenge. If not set, the state of the challenge is unknown. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + served: true + storage: true + subresources: + status: {} +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificaterequests.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: CertificateRequest + listKind: CertificateRequestList + plural: certificaterequests + shortNames: + - cr + - crs + singular: certificaterequest + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Approved")].status + name: Approved + type: string + - jsonPath: .status.conditions[?(@.type=="Denied")].status + name: Denied + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + type: string + - jsonPath: .spec.username + name: Requestor + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A CertificateRequest is used to request a signed certificate from one of the configured issuers. \n All fields within the CertificateRequest's `spec` are immutable after creation. A CertificateRequest will either succeed or fail, as denoted by its `status.state` field. \n A CertificateRequest is a one-shot resource, meaning it represents a single point in time request for a certificate and cannot be re-used." + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the CertificateRequest resource. + type: object + required: + - issuerRef + - request + properties: + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. + type: string + extra: + description: Extra contains extra attributes of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: object + additionalProperties: + type: array + items: + type: string + groups: + description: Groups contains group membership of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: array + items: + type: string + x-kubernetes-list-type: atomic + isCA: + description: IsCA will request to mark the certificate as valid for certificate signing when submitting to the issuer. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this CertificateRequest. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the CertificateRequest will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. The group field refers to the API group of the issuer which defaults to `cert-manager.io` if empty. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: The PEM-encoded x509 certificate signing request to be submitted to the CA for signing. + type: string + format: byte + uid: + description: UID contains the uid of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. If usages are set they SHOULD be encoded inside the CSR spec Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + username: + description: Username contains the name of the user that created the CertificateRequest. Populated by the cert-manager webhook on creation and immutable. + type: string + status: + description: Status of the CertificateRequest. This is set and managed automatically. + type: object + properties: + ca: + description: The PEM encoded x509 certificate of the signer, also known as the CA (Certificate Authority). This is set on a best-effort basis by different issuers. If not set, the CA is assumed to be unknown/not available. + type: string + format: byte + certificate: + description: The PEM encoded x509 certificate resulting from the certificate signing request. If not set, the CertificateRequest has either not been completed or has failed. More information on failure can be found by checking the `conditions` field. + type: string + format: byte + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready` and `InvalidRequest`. + type: array + items: + description: CertificateRequestCondition contains condition information for a CertificateRequest. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `InvalidRequest`, `Approved`, `Denied`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failureTime: + description: FailureTime stores the time that this CertificateRequest failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: issuers.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: Issuer + listKind: IssuerList + plural: issuers + singular: issuer + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: An Issuer represents a certificate issuing authority which can be referenced as part of `issuerRef` fields. It is scoped to a single namespace and can therefore only be referenced by resources within the same namespace. + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the Issuer resource. + type: object + properties: + acme: + description: ACME configures this issuer to communicate with a RFC8555 (ACME) server to obtain signed x509 certificates. + type: object + required: + - privateKeySecretRef + - server + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which can be used to validate the certificate chain presented by the ACME server. Mutually exclusive with SkipTLSVerify; prefer using CABundle to prevent various kinds of security vulnerabilities. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. + type: string + format: byte + disableAccountKeyGeneration: + description: Enables or disables generating a new ACME account key. If true, the Issuer resource will *not* request a new account but will expect the account key to be supplied via an existing secret. If false, the cert-manager system will generate a new ACME account key for the Issuer. Defaults to false. + type: boolean + email: + description: Email is the email address to be associated with the ACME account. This field is optional, but it is strongly recommended to be set. It will be used to contact you in case of issues with your account or certificates, including expiry notification emails. This field may be updated after the account is initially registered. + type: string + enableDurationFeature: + description: Enables requesting a Not After date on certificates that matches the duration of the certificate. This is not supported by all ACME servers like Let's Encrypt. If set to true when the ACME server does not support it it will create an error on the Order. Defaults to false. + type: boolean + externalAccountBinding: + description: ExternalAccountBinding is a reference to a CA external account of the ACME server. If set, upon registration cert-manager will attempt to associate the given external account credentials with the registered ACME account. + type: object + required: + - keyID + - keySecretRef + properties: + keyAlgorithm: + description: "Deprecated: keyAlgorithm field exists for historical compatibility reasons and should not be used. The algorithm is now hardcoded to HS256 in golang/x/crypto/acme." + type: string + enum: + - HS256 + - HS384 + - HS512 + keyID: + description: keyID is the ID of the CA key that the External Account is bound to. + type: string + keySecretRef: + description: keySecretRef is a Secret Key Selector referencing a data item in a Kubernetes Secret which holds the symmetric MAC key of the External Account Binding. The `key` is the index string that is paired with the key data in the Secret and should not be confused with the key data itself, or indeed with the External Account Binding keyID above. The secret key stored in the Secret **must** be un-padded, base64 URL encoded data. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + preferredChain: + description: 'PreferredChain is the chain to use if the ACME server outputs multiple. PreferredChain is no guarantee that this one gets delivered by the ACME endpoint. For example, for Let''s Encrypt''s DST crosssign you would use: "DST Root CA X3" or "ISRG Root X1" for the newer Let''s Encrypt root CA. This value picks the first certificate bundle in the ACME alternative chains that has a certificate with this value as its issuer''s CN' + type: string + maxLength: 64 + privateKeySecretRef: + description: PrivateKey is the name of a Kubernetes Secret resource that will be used to store the automatically generated ACME account private key. Optionally, a `key` may be specified to select a specific entry within the named Secret resource. If `key` is not specified, a default of `tls.key` will be used. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + server: + description: 'Server is the URL used to access the ACME server''s ''directory'' endpoint. For example, for Let''s Encrypt''s staging endpoint, you would use: "https://acme-staging-v02.api.letsencrypt.org/directory". Only ACME v2 endpoints (i.e. RFC 8555) are supported.' + type: string + skipTLSVerify: + description: "INSECURE: Enables or disables validation of the ACME server TLS certificate. If true, requests to the ACME server will not have the TLS certificate chain validated. Mutually exclusive with CABundle; prefer using CABundle to prevent various kinds of security vulnerabilities. Only enable this option in development environments. If CABundle and SkipTLSVerify are unset, the system certificate bundle inside the container is used to validate the TLS connection. Defaults to false." + type: boolean + solvers: + description: "Solvers is a list of challenge solvers that will be used to solve ACME challenges for the matching domains. Solver configurations must be provided in order to obtain certificates from an ACME server. For more information, see: https://cert-manager.io/docs/configuration/acme/" + type: array + items: + description: An ACMEChallengeSolver describes how to solve ACME challenges for the issuer it is part of. A selector may be provided to use different solving strategies for different DNS names. Only one of HTTP01 or DNS01 must be provided. + type: object + properties: + dns01: + description: Configures cert-manager to attempt to complete authorizations by performing the DNS01 challenge flow. + type: object + properties: + acmeDNS: + description: Use the 'ACME DNS' (https://github.com/joohoi/acme-dns) API to manage DNS01 challenge records. + type: object + required: + - accountSecretRef + - host + properties: + accountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + host: + type: string + akamai: + description: Use the Akamai DNS zone management API to manage DNS01 challenge records. + type: object + required: + - accessTokenSecretRef + - clientSecretSecretRef + - clientTokenSecretRef + - serviceConsumerDomain + properties: + accessTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientSecretSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + clientTokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + serviceConsumerDomain: + type: string + azureDNS: + description: Use the Microsoft Azure DNS API to manage DNS01 challenge records. + type: object + required: + - resourceGroupName + - subscriptionID + properties: + clientID: + description: if both this and ClientSecret are left unset MSI will be used + type: string + clientSecretSecretRef: + description: if both this and ClientID are left unset MSI will be used + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + environment: + description: name of the Azure environment (default AzurePublicCloud) + type: string + enum: + - AzurePublicCloud + - AzureChinaCloud + - AzureGermanCloud + - AzureUSGovernmentCloud + hostedZoneName: + description: name of the DNS zone that should be used + type: string + managedIdentity: + description: managed identity configuration, can not be used at the same time as clientID, clientSecretSecretRef or tenantID + type: object + properties: + clientID: + description: client ID of the managed identity, can not be used at the same time as resourceID + type: string + resourceID: + description: resource ID of the managed identity, can not be used at the same time as clientID + type: string + resourceGroupName: + description: resource group the DNS zone is located in + type: string + subscriptionID: + description: ID of the Azure subscription + type: string + tenantID: + description: when specifying ClientID and ClientSecret then this field is also needed + type: string + cloudDNS: + description: Use the Google Cloud DNS API to manage DNS01 challenge records. + type: object + required: + - project + properties: + hostedZoneName: + description: HostedZoneName is an optional field that tells cert-manager in which Cloud DNS zone the challenge record has to be created. If left empty cert-manager will automatically choose a zone. + type: string + project: + type: string + serviceAccountSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + cloudflare: + description: Use the Cloudflare API to manage DNS01 challenge records. + type: object + properties: + apiKeySecretRef: + description: "API key to use to authenticate with Cloudflare. Note: using an API token to authenticate is now the recommended method as it allows greater control of permissions." + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + apiTokenSecretRef: + description: API token used to authenticate with Cloudflare. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + email: + description: Email of the account, only required when using API key based authentication. + type: string + cnameStrategy: + description: CNAMEStrategy configures how the DNS01 provider should handle CNAME records when found in DNS zones. + type: string + enum: + - None + - Follow + digitalocean: + description: Use the DigitalOcean DNS API to manage DNS01 challenge records. + type: object + required: + - tokenSecretRef + properties: + tokenSecretRef: + description: A reference to a specific 'key' within a Secret resource. In some instances, `key` is a required field. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + rfc2136: + description: Use RFC2136 ("Dynamic Updates in the Domain Name System") (https://datatracker.ietf.org/doc/rfc2136/) to manage DNS01 challenge records. + type: object + required: + - nameserver + properties: + nameserver: + description: The IP address or hostname of an authoritative DNS server supporting RFC2136 in the form host:port. If the host is an IPv6 address it must be enclosed in square brackets (e.g [2001:db8::1]) ; port is optional. This field is required. + type: string + tsigAlgorithm: + description: "The TSIG Algorithm configured in the DNS supporting RFC2136. Used only when ``tsigSecretSecretRef`` and ``tsigKeyName`` are defined. Supported values are (case-insensitive): ``HMACMD5`` (default), ``HMACSHA1``, ``HMACSHA256`` or ``HMACSHA512``." + type: string + tsigKeyName: + description: The TSIG Key name configured in the DNS. If ``tsigSecretSecretRef`` is defined, this field is required. + type: string + tsigSecretSecretRef: + description: The name of the secret containing the TSIG value. If ``tsigKeyName`` is defined, this field is required. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + route53: + description: Use the AWS Route53 API to manage DNS01 challenge records. + type: object + required: + - region + properties: + accessKeyID: + description: "The AccessKeyID is used for authentication. Cannot be set when SecretAccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: string + accessKeyIDSecretRef: + description: "The SecretAccessKey is used for authentication. If set, pull the AWS access key ID from a key within a Kubernetes Secret. Cannot be set when AccessKeyID is set. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + hostedZoneID: + description: If set, the provider will manage only this zone in Route53 and will not do an lookup using the route53:ListHostedZonesByName api call. + type: string + region: + description: Always set the region when using AccessKeyID and SecretAccessKey + type: string + role: + description: Role is a Role ARN which the Route53 provider will assume using either the explicit credentials AccessKeyID/SecretAccessKey or the inferred credentials from environment variables, shared credentials file or AWS Instance metadata + type: string + secretAccessKeySecretRef: + description: "The SecretAccessKey is used for authentication. If neither the Access Key nor Key ID are set, we fall-back to using env vars, shared credentials file or AWS Instance metadata, see: https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials" + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + webhook: + description: Configure an external webhook based DNS01 challenge solver to manage DNS01 challenge records. + type: object + required: + - groupName + - solverName + properties: + config: + description: Additional configuration that should be passed to the webhook apiserver when challenges are processed. This can contain arbitrary JSON data. Secret values should not be specified in this stanza. If secret values are needed (e.g. credentials for a DNS service), you should use a SecretKeySelector to reference a Secret resource. For details on the schema of this field, consult the webhook provider implementation's documentation. + x-kubernetes-preserve-unknown-fields: true + groupName: + description: The API group name that should be used when POSTing ChallengePayload resources to the webhook apiserver. This should be the same as the GroupName specified in the webhook provider implementation. + type: string + solverName: + description: The name of the solver to use, as defined in the webhook provider implementation. This will typically be the name of the provider, e.g. 'cloudflare'. + type: string + http01: + description: Configures cert-manager to attempt to complete authorizations by performing the HTTP01 challenge flow. It is not possible to obtain certificates for wildcard domain names (e.g. `*.example.com`) using the HTTP01 challenge mechanism. + type: object + properties: + gatewayHTTPRoute: + description: The Gateway API is a sig-network community API that models service networking in Kubernetes (https://gateway-api.sigs.k8s.io/). The Gateway solver will create HTTPRoutes with the specified labels in the same namespace as the challenge. This solver is experimental, and fields / behaviour may change in the future. + type: object + properties: + labels: + description: Custom labels that will be applied to HTTPRoutes created by cert-manager while solving HTTP-01 challenges. + type: object + additionalProperties: + type: string + parentRefs: + description: "When solving an HTTP-01 challenge, cert-manager creates an HTTPRoute. cert-manager needs to know which parentRefs should be used when creating the HTTPRoute. Usually, the parentRef references a Gateway. See: https://gateway-api.sigs.k8s.io/api-types/httproute/#attaching-to-gateways" + type: array + items: + description: "ParentReference identifies an API object (usually a Gateway) that can be considered a parent of this resource (usually a route). The only kind of parent resource with \"Core\" support is Gateway. This API may be extended in the future to support additional kinds of parent resources, such as HTTPRoute. \n The API object must be valid in the cluster; the Group and Kind must be registered in the cluster for this reference to be valid." + type: object + required: + - name + properties: + group: + description: "Group is the group of the referent. When unspecified, \"gateway.networking.k8s.io\" is inferred. To set the core API group (such as for a \"Service\" kind referent), Group must be explicitly set to \"\" (empty string). \n Support: Core" + type: string + default: gateway.networking.k8s.io + maxLength: 253 + pattern: ^$|^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + kind: + description: "Kind is kind of the referent. \n Support: Core (Gateway) \n Support: Implementation-specific (Other Resources)" + type: string + default: Gateway + maxLength: 63 + minLength: 1 + pattern: ^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$ + name: + description: "Name is the name of the referent. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + namespace: + description: "Namespace is the namespace of the referent. When unspecified, this refers to the local namespace of the Route. \n Note that there are specific rules for ParentRefs which cross namespace boundaries. Cross-namespace references are only valid if they are explicitly allowed by something in the namespace they are referring to. For example: Gateway has the AllowedRoutes field, and ReferenceGrant provides a generic way to enable any other kind of cross-namespace reference. \n Support: Core" + type: string + maxLength: 63 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + port: + description: "Port is the network port this Route targets. It can be interpreted differently based on the type of parent resource. \n When the parent resource is a Gateway, this targets all listeners listening on the specified port that also support this kind of Route(and select this Route). It's not recommended to set `Port` unless the networking behaviors specified in a Route must apply to a specific port as opposed to a listener(s) whose port(s) may be changed. When both Port and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support other parent resources. Implementations supporting other types of parent resources MUST clearly document how/if Port is interpreted. \n For the purpose of status, an attachment is considered successful as long as the parent resource accepts it partially. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Extended \n " + type: integer + format: int32 + maximum: 65535 + minimum: 1 + sectionName: + description: "SectionName is the name of a section within the target resource. In the following resources, SectionName is interpreted as the following: \n * Gateway: Listener Name. When both Port (experimental) and SectionName are specified, the name and port of the selected listener must match both specified values. \n Implementations MAY choose to support attaching Routes to other resources. If that is the case, they MUST clearly document how SectionName is interpreted. \n When unspecified (empty string), this will reference the entire resource. For the purpose of status, an attachment is considered successful if at least one section in the parent resource accepts it. For example, Gateway listeners can restrict which Routes can attach to them by Route kind, namespace, or hostname. If 1 of 2 Gateway listeners accept attachment from the referencing Route, the Route MUST be considered successfully attached. If no Gateway listeners accept attachment from this Route, the Route MUST be considered detached from the Gateway. \n Support: Core" + type: string + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + ingress: + description: The ingress based HTTP01 challenge solver will solve challenges by creating or modifying Ingress resources in order to route requests for '/.well-known/acme-challenge/XYZ' to 'challenge solver' pods that are provisioned by cert-manager for each Challenge to be completed. + type: object + properties: + class: + description: The ingress class to use when creating Ingress resources to solve ACME challenges that use this challenge solver. Only one of 'class' or 'name' may be specified. + type: string + ingressTemplate: + description: Optional ingress template used to configure the ACME challenge solver ingress used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the ingress used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver ingress. + type: object + additionalProperties: + type: string + name: + description: The name of the ingress resource that should have ACME challenge solving routes inserted into it in order to solve HTTP01 challenges. This is typically used in conjunction with ingress controllers like ingress-gce, which maintains a 1:1 mapping between external IPs and ingress resources. + type: string + podTemplate: + description: Optional pod template used to configure the ACME challenge solver pods used for HTTP01 challenges. + type: object + properties: + metadata: + description: ObjectMeta overrides for the pod used to solve HTTP01 challenges. Only the 'labels' and 'annotations' fields may be set. If labels or annotations overlap with in-built values, the values here will override the in-built values. + type: object + properties: + annotations: + description: Annotations that should be added to the create ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + labels: + description: Labels that should be added to the created ACME HTTP01 solver pods. + type: object + additionalProperties: + type: string + spec: + description: PodSpec defines overrides for the HTTP01 challenge solver pod. Only the 'priorityClassName', 'nodeSelector', 'affinity', 'serviceAccountName' and 'tolerations' fields are supported currently. All other fields will be ignored. + type: object + properties: + affinity: + description: If specified, the pod's scheduling constraints + type: object + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + type: array + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + type: object + required: + - preference + - weight + properties: + preference: + description: A node selector term, associated with the corresponding weight. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + type: object + required: + - nodeSelectorTerms + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + type: array + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + type: object + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchFields: + description: A list of node selector requirements by node's fields. + type: array + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + type: array + items: + type: string + x-kubernetes-map-type: atomic + x-kubernetes-map-type: atomic + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + type: object + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + type: array + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + type: object + required: + - podAffinityTerm + - weight + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + type: integer + format: int32 + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + type: array + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + type: object + required: + - topologyKey + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaceSelector: + description: A label query over the set of namespaces that the term applies to. The term is applied to the union of the namespaces selected by this field and the ones listed in the namespaces field. null selector and null or empty namespaces list means "this pod's namespace". An empty selector ({}) matches all namespaces. + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + x-kubernetes-map-type: atomic + namespaces: + description: namespaces specifies a static list of namespace names that the term applies to. The term is applied to the union of the namespaces listed in this field and the ones selected by namespaceSelector. null or empty namespaces list and null namespaceSelector means "this pod's namespace". + type: array + items: + type: string + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + nodeSelector: + description: "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" + type: object + additionalProperties: + type: string + priorityClassName: + description: If specified, the pod's priorityClassName. + type: string + serviceAccountName: + description: If specified, the pod's service account + type: string + tolerations: + description: If specified, the pod's tolerations. + type: array + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + serviceType: + description: Optional service type for Kubernetes solver service. Supported values are NodePort or ClusterIP. If unset, defaults to NodePort. + type: string + selector: + description: Selector selects a set of DNSNames on the Certificate resource that should be solved using this challenge solver. If not specified, the solver will be treated as the 'default' solver with the lowest priority, i.e. if any other solver has a more specific match, it will be used instead. + type: object + properties: + dnsNames: + description: List of DNSNames that this solver will be used to solve. If specified and a match is found, a dnsNames selector will take precedence over a dnsZones selector. If multiple solvers match with the same dnsNames value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + dnsZones: + description: List of DNSZones that this solver will be used to solve. The most specific DNS zone match specified here will take precedence over other DNS zone matches, so a solver specifying sys.example.com will be selected over one specifying example.com for the domain www.sys.example.com. If multiple solvers match with the same dnsZones value, the solver with the most matching labels in matchLabels will be selected. If neither has more matches, the solver defined earlier in the list will be selected. + type: array + items: + type: string + matchLabels: + description: A label selector that is used to refine the set of certificate's that this challenge solver will apply to. + type: object + additionalProperties: + type: string + ca: + description: CA configures this issuer to sign certificates using a signing CA keypair stored in a Secret resource. This is used to build internal PKIs that are managed by cert-manager. + type: object + required: + - secretName + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set, certificates will be issued without distribution points set. + type: array + items: + type: string + ocspServers: + description: The OCSP server list is an X.509 v3 extension that defines a list of URLs of OCSP responders. The OCSP responders can be queried for the revocation status of an issued certificate. If not set, the certificate will be issued with no OCSP servers set. For example, an OCSP server URL could be "http://ocsp.int-x3.letsencrypt.org". + type: array + items: + type: string + secretName: + description: SecretName is the name of the secret used to sign Certificates issued by this Issuer. + type: string + selfSigned: + description: SelfSigned configures this issuer to 'self sign' certificates using the private key used to create the CertificateRequest object. + type: object + properties: + crlDistributionPoints: + description: The CRL distribution points is an X.509 v3 certificate extension which identifies the location of the CRL from which the revocation of this certificate can be checked. If not set certificate will be issued without CDP. Values are strings. + type: array + items: + type: string + vault: + description: Vault configures this issuer to sign certificates using a HashiCorp Vault PKI backend. + type: object + required: + - auth + - path + - server + properties: + auth: + description: Auth configures how cert-manager authenticates with the Vault server. + type: object + properties: + appRole: + description: AppRole authenticates with Vault using the App Role auth mechanism, with the role and secret stored in a Kubernetes Secret resource. + type: object + required: + - path + - roleId + - secretRef + properties: + path: + description: 'Path where the App Role authentication backend is mounted in Vault, e.g: "approle"' + type: string + roleId: + description: RoleID configured in the App Role authentication backend when setting up the authentication backend in Vault. + type: string + secretRef: + description: Reference to a key in a Secret that contains the App Role secret used to authenticate with Vault. The `key` field must be specified and denotes which entry within the Secret resource is used as the app role secret. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + kubernetes: + description: Kubernetes authenticates with Vault by passing the ServiceAccount token stored in the named Secret resource to the Vault server. + type: object + required: + - role + - secretRef + properties: + mountPath: + description: The Vault mountPath here is the mount path to use when authenticating with Vault. For example, setting a value to `/v1/auth/foo`, will use the path `/v1/auth/foo/login` to authenticate with Vault. If unspecified, the default value "/v1/auth/kubernetes" will be used. + type: string + role: + description: A required field containing the Vault Role to assume. A Role binds a Kubernetes ServiceAccount with a set of Vault policies. + type: string + secretRef: + description: The required Secret field containing a Kubernetes ServiceAccount JWT used for authenticating with Vault. Use of 'ambient credentials' is not supported. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + tokenSecretRef: + description: TokenSecretRef authenticates with Vault by presenting a token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by Vault. Only used if using HTTPS to connect to Vault and ignored for HTTP connections. Mutually exclusive with CABundleSecretRef. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. + type: string + format: byte + caBundleSecretRef: + description: Reference to a Secret containing a bundle of PEM-encoded CAs to use when verifying the certificate chain presented by Vault when using HTTPS. Mutually exclusive with CABundle. If neither CABundle nor CABundleSecretRef are defined, the certificate bundle in the cert-manager controller container is used to validate the TLS connection. If no key for the Secret is specified, cert-manager will default to 'ca.crt'. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + namespace: + description: 'Name of the vault namespace. Namespaces is a set of features within Vault Enterprise that allows Vault environments to support Secure Multi-tenancy. e.g: "ns1" More about namespaces can be found here https://www.vaultproject.io/docs/enterprise/namespaces' + type: string + path: + description: 'Path is the mount path of the Vault PKI backend''s `sign` endpoint, e.g: "my_pki_mount/sign/my-role-name".' + type: string + server: + description: 'Server is the connection address for the Vault server, e.g: "https://vault.example.com:8200".' + type: string + venafi: + description: Venafi configures this issuer to sign certificates using a Venafi TPP or Venafi Cloud policy zone. + type: object + required: + - zone + properties: + cloud: + description: Cloud specifies the Venafi cloud configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - apiTokenSecretRef + properties: + apiTokenSecretRef: + description: APITokenSecretRef is a secret key selector for the Venafi Cloud API token. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: URL is the base URL for Venafi Cloud. Defaults to "https://api.venafi.cloud/v1". + type: string + tpp: + description: TPP specifies Trust Protection Platform configuration settings. Only one of TPP or Cloud may be specified. + type: object + required: + - credentialsRef + - url + properties: + caBundle: + description: Base64-encoded bundle of PEM CAs which will be used to validate the certificate chain presented by the TPP server. Only used if using HTTPS; ignored for HTTP. If undefined, the certificate bundle in the cert-manager controller container is used to validate the chain. + type: string + format: byte + credentialsRef: + description: CredentialsRef is a reference to a Secret containing the username and password for the TPP server. The secret must contain two keys, 'username' and 'password'. + type: object + required: + - name + properties: + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + url: + description: 'URL is the base URL for the vedsdk endpoint of the Venafi TPP instance, for example: "https://tpp.example.com/vedsdk".' + type: string + zone: + description: Zone is the Venafi Policy Zone to use for this issuer. All requests made to the Venafi platform will be restricted by the named zone policy. This field is required. + type: string + status: + description: Status of the Issuer. This is set and managed automatically. + type: object + properties: + acme: + description: ACME specific status options. This field should only be set if the Issuer is configured to use an ACME server to issue certificates. + type: object + properties: + lastRegisteredEmail: + description: LastRegisteredEmail is the email associated with the latest registered ACME account, in order to track changes made to registered account associated with the Issuer + type: string + uri: + description: URI is the unique account identifier, which can also be used to retrieve account details from the CA + type: string + conditions: + description: List of status conditions to indicate the status of a CertificateRequest. Known condition types are `Ready`. + type: array + items: + description: IssuerCondition contains condition information for an Issuer. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Issuer. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: certificates.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: cert-manager.io + names: + kind: Certificate + listKind: CertificateList + plural: certificates + shortNames: + - cert + - certs + singular: certificate + categories: + - cert-manager + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="Ready")].status + name: Ready + type: string + - jsonPath: .spec.secretName + name: Secret + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.conditions[?(@.type=="Ready")].message + name: Status + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: "A Certificate resource should be created to ensure an up to date and signed x509 certificate is stored in the Kubernetes Secret resource named in `spec.secretName`. \n The stored certificate will be renewed before it expires (as configured by `spec.renewBefore`)." + type: object + required: + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: Desired state of the Certificate resource. + type: object + required: + - issuerRef + - secretName + properties: + additionalOutputFormats: + description: AdditionalOutputFormats defines extra output formats of the private key and signed certificate chain to be written to this Certificate's target Secret. This is an Alpha Feature and is only enabled with the `--feature-gates=AdditionalCertificateOutputFormats=true` option on both the controller and webhook components. + type: array + items: + description: CertificateAdditionalOutputFormat defines an additional output format of a Certificate resource. These contain supplementary data formats of the signed certificate chain and paired private key. + type: object + required: + - type + properties: + type: + description: Type is the name of the format type that should be written to the Certificate's target Secret. + type: string + enum: + - DER + - CombinedPEM + commonName: + description: "CommonName is a common name to be used on the Certificate. The CommonName should have a length of 64 characters or fewer to avoid generating invalid CSRs. This value is ignored by TLS clients when any subject alt name is set. This is x509 behaviour: https://tools.ietf.org/html/rfc6125#section-6.4.4" + type: string + dnsNames: + description: DNSNames is a list of DNS subjectAltNames to be set on the Certificate. + type: array + items: + type: string + duration: + description: The requested 'duration' (i.e. lifetime) of the Certificate. This option may be ignored/overridden by some issuer types. If unset this defaults to 90 days. Certificate will be renewed either 2/3 through its duration or `renewBefore` period before its expiry, whichever is later. Minimum accepted duration is 1 hour. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + emailAddresses: + description: EmailAddresses is a list of email subjectAltNames to be set on the Certificate. + type: array + items: + type: string + encodeUsagesInRequest: + description: EncodeUsagesInRequest controls whether key usages should be present in the CertificateRequest + type: boolean + ipAddresses: + description: IPAddresses is a list of IP address subjectAltNames to be set on the Certificate. + type: array + items: + type: string + isCA: + description: IsCA will mark this Certificate as valid for certificate signing. This will automatically add the `cert sign` usage to the list of `usages`. + type: boolean + issuerRef: + description: IssuerRef is a reference to the issuer for this certificate. If the `kind` field is not set, or set to `Issuer`, an Issuer resource with the given name in the same namespace as the Certificate will be used. If the `kind` field is set to `ClusterIssuer`, a ClusterIssuer with the provided name will be used. The `name` field in this stanza is required at all times. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + keystores: + description: Keystores configures additional keystore output formats stored in the `secretName` Secret resource. + type: object + properties: + jks: + description: JKS configures options for storing a JKS keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables JKS keystore creation for the Certificate. If true, a file named `keystore.jks` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. A file named `truststore.jks` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the JKS keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + pkcs12: + description: PKCS12 configures options for storing a PKCS12 keystore in the `spec.secretName` Secret resource. + type: object + required: + - create + - passwordSecretRef + properties: + create: + description: Create enables PKCS12 keystore creation for the Certificate. If true, a file named `keystore.p12` will be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef`. The keystore file will be updated immediately. A file named `truststore.p12` will also be created in the target Secret resource, encrypted using the password stored in `passwordSecretRef` containing the issuing Certificate Authority + type: boolean + passwordSecretRef: + description: PasswordSecretRef is a reference to a key in a Secret resource containing the password used to encrypt the PKCS12 keystore. + type: object + required: + - name + properties: + key: + description: The key of the entry in the Secret resource's `data` field to be used. Some instances of this field may be defaulted, in others it may be required. + type: string + name: + description: "Name of the resource being referred to. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names" + type: string + literalSubject: + description: LiteralSubject is an LDAP formatted string that represents the [X.509 Subject field](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6). Use this *instead* of the Subject field if you need to ensure the correct ordering of the RDN sequence, such as when issuing certs for LDAP authentication. See https://github.com/cert-manager/cert-manager/issues/3203, https://github.com/cert-manager/cert-manager/issues/4424. This field is alpha level and is only supported by cert-manager installations where LiteralCertificateSubject feature gate is enabled on both cert-manager controller and webhook. + type: string + privateKey: + description: Options to control private keys used for the Certificate. + type: object + properties: + algorithm: + description: Algorithm is the private key algorithm of the corresponding private key for this certificate. If provided, allowed values are either `RSA`,`Ed25519` or `ECDSA` If `algorithm` is specified and `size` is not provided, key size of 256 will be used for `ECDSA` key algorithm and key size of 2048 will be used for `RSA` key algorithm. key size is ignored when using the `Ed25519` key algorithm. + type: string + enum: + - RSA + - ECDSA + - Ed25519 + encoding: + description: The private key cryptography standards (PKCS) encoding for this certificate's private key to be encoded in. If provided, allowed values are `PKCS1` and `PKCS8` standing for PKCS#1 and PKCS#8, respectively. Defaults to `PKCS1` if not specified. + type: string + enum: + - PKCS1 + - PKCS8 + rotationPolicy: + description: RotationPolicy controls how private keys should be regenerated when a re-issuance is being processed. If set to Never, a private key will only be generated if one does not already exist in the target `spec.secretName`. If one does exists but it does not have the correct algorithm or size, a warning will be raised to await user intervention. If set to Always, a private key matching the specified requirements will be generated whenever a re-issuance occurs. Default is 'Never' for backward compatibility. + type: string + enum: + - Never + - Always + size: + description: Size is the key bit size of the corresponding private key for this certificate. If `algorithm` is set to `RSA`, valid values are `2048`, `4096` or `8192`, and will default to `2048` if not specified. If `algorithm` is set to `ECDSA`, valid values are `256`, `384` or `521`, and will default to `256` if not specified. If `algorithm` is set to `Ed25519`, Size is ignored. No other values are allowed. + type: integer + renewBefore: + description: How long before the currently issued certificate's expiry cert-manager should renew the certificate. The default is 2/3 of the issued certificate's duration. Minimum accepted value is 5 minutes. Value must be in units accepted by Go time.ParseDuration https://golang.org/pkg/time/#ParseDuration + type: string + revisionHistoryLimit: + description: revisionHistoryLimit is the maximum number of CertificateRequest revisions that are maintained in the Certificate's history. Each revision represents a single `CertificateRequest` created by this Certificate, either when it was created, renewed, or Spec was changed. Revisions will be removed by oldest first if the number of revisions exceeds this number. If set, revisionHistoryLimit must be a value of `1` or greater. If unset (`nil`), revisions will not be garbage collected. Default value is `nil`. + type: integer + format: int32 + secretName: + description: SecretName is the name of the secret resource that will be automatically created and managed by this Certificate resource. It will be populated with a private key and certificate, signed by the denoted issuer. + type: string + secretTemplate: + description: SecretTemplate defines annotations and labels to be copied to the Certificate's Secret. Labels and annotations on the Secret will be changed as they appear on the SecretTemplate when added or removed. SecretTemplate annotations are added in conjunction with, and cannot overwrite, the base set of annotations cert-manager sets on the Certificate's Secret. + type: object + properties: + annotations: + description: Annotations is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + labels: + description: Labels is a key value map to be copied to the target Kubernetes Secret. + type: object + additionalProperties: + type: string + subject: + description: Full X509 name specification (https://golang.org/pkg/crypto/x509/pkix/#Name). + type: object + properties: + countries: + description: Countries to be used on the Certificate. + type: array + items: + type: string + localities: + description: Cities to be used on the Certificate. + type: array + items: + type: string + organizationalUnits: + description: Organizational Units to be used on the Certificate. + type: array + items: + type: string + organizations: + description: Organizations to be used on the Certificate. + type: array + items: + type: string + postalCodes: + description: Postal codes to be used on the Certificate. + type: array + items: + type: string + provinces: + description: State/Provinces to be used on the Certificate. + type: array + items: + type: string + serialNumber: + description: Serial number to be used on the Certificate. + type: string + streetAddresses: + description: Street addresses to be used on the Certificate. + type: array + items: + type: string + uris: + description: URIs is a list of URI subjectAltNames to be set on the Certificate. + type: array + items: + type: string + usages: + description: Usages is the set of x509 usages that are requested for the certificate. Defaults to `digital signature` and `key encipherment` if not specified. + type: array + items: + description: "KeyUsage specifies valid usage contexts for keys. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 https://tools.ietf.org/html/rfc5280#section-4.2.1.12 \n Valid KeyUsage values are as follows: \"signing\", \"digital signature\", \"content commitment\", \"key encipherment\", \"key agreement\", \"data encipherment\", \"cert sign\", \"crl sign\", \"encipher only\", \"decipher only\", \"any\", \"server auth\", \"client auth\", \"code signing\", \"email protection\", \"s/mime\", \"ipsec end system\", \"ipsec tunnel\", \"ipsec user\", \"timestamping\", \"ocsp signing\", \"microsoft sgc\", \"netscape sgc\"" + type: string + enum: + - signing + - digital signature + - content commitment + - key encipherment + - key agreement + - data encipherment + - cert sign + - crl sign + - encipher only + - decipher only + - any + - server auth + - client auth + - code signing + - email protection + - s/mime + - ipsec end system + - ipsec tunnel + - ipsec user + - timestamping + - ocsp signing + - microsoft sgc + - netscape sgc + status: + description: Status of the Certificate. This is set and managed automatically. + type: object + properties: + conditions: + description: List of status conditions to indicate the status of certificates. Known condition types are `Ready` and `Issuing`. + type: array + items: + description: CertificateCondition contains condition information for an Certificate. + type: object + required: + - status + - type + properties: + lastTransitionTime: + description: LastTransitionTime is the timestamp corresponding to the last status change of this condition. + type: string + format: date-time + message: + description: Message is a human readable description of the details of the last transition, complementing reason. + type: string + observedGeneration: + description: If set, this represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.condition[x].observedGeneration is 9, the condition is out of date with respect to the current state of the Certificate. + type: integer + format: int64 + reason: + description: Reason is a brief machine readable explanation for the condition's last transition. + type: string + status: + description: Status of the condition, one of (`True`, `False`, `Unknown`). + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: Type of the condition, known values are (`Ready`, `Issuing`). + type: string + x-kubernetes-list-map-keys: + - type + x-kubernetes-list-type: map + failedIssuanceAttempts: + description: The number of continuous failed issuance attempts up till now. This field gets removed (if set) on a successful issuance and gets set to 1 if unset and an issuance has failed. If an issuance has failed, the delay till the next issuance will be calculated using formula time.Hour * 2 ^ (failedIssuanceAttempts - 1). + type: integer + lastFailureTime: + description: LastFailureTime is the time as recorded by the Certificate controller of the most recent failure to complete a CertificateRequest for this Certificate resource. If set, cert-manager will not re-request another Certificate until 1 hour has elapsed from this time. + type: string + format: date-time + nextPrivateKeySecretName: + description: The name of the Secret resource containing the private key to be used for the next certificate iteration. The keymanager controller will automatically set this field if the `Issuing` condition is set to `True`. It will automatically unset this field when the Issuing condition is not set or False. + type: string + notAfter: + description: The expiration time of the certificate stored in the secret named by this resource in `spec.secretName`. + type: string + format: date-time + notBefore: + description: The time after which the certificate stored in the secret named by this resource in spec.secretName is valid. + type: string + format: date-time + renewalTime: + description: RenewalTime is the time at which the certificate will be next renewed. If not set, no upcoming renewal is scheduled. + type: string + format: date-time + revision: + description: "The current 'revision' of the certificate as issued. \n When a CertificateRequest resource is created, it will have the `cert-manager.io/certificate-revision` set to one greater than the current value of this field. \n Upon issuance, this field will be set to the value of the annotation on the CertificateRequest resource used to issue the certificate. \n Persisting the value on the CertificateRequest resource allows the certificates controller to know whether a request is part of an old issuance or if it is part of the ongoing revision's issuance by checking if the revision value in the annotation is greater than this field." + type: integer + served: true + storage: true +--- +# Source: cert-manager/templates/crds.yaml +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: orders.acme.cert-manager.io + labels: + app: "cert-manager" + app.kubernetes.io/name: "cert-manager" + app.kubernetes.io/instance: "cert-manager" + # Generated labels + app.kubernetes.io/version: "v1.11.0" +spec: + group: acme.cert-manager.io + names: + kind: Order + listKind: OrderList + plural: orders + singular: order + categories: + - cert-manager + - cert-manager-acme + scope: Namespaced + versions: + - name: v1 + subresources: + status: {} + additionalPrinterColumns: + - jsonPath: .status.state + name: State + type: string + - jsonPath: .spec.issuerRef.name + name: Issuer + priority: 1 + type: string + - jsonPath: .status.reason + name: Reason + priority: 1 + type: string + - jsonPath: .metadata.creationTimestamp + description: CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC. + name: Age + type: date + schema: + openAPIV3Schema: + description: Order is a type to represent an Order with an ACME server + type: object + required: + - metadata + - spec + properties: + apiVersion: + description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + type: object + required: + - issuerRef + - request + properties: + commonName: + description: CommonName is the common name as specified on the DER encoded CSR. If specified, this value must also be present in `dnsNames` or `ipAddresses`. This field must match the corresponding field on the DER encoded CSR. + type: string + dnsNames: + description: DNSNames is a list of DNS names that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + duration: + description: Duration is the duration for the not after date for the requested certificate. this is set on order creation as pe the ACME spec. + type: string + ipAddresses: + description: IPAddresses is a list of IP addresses that should be included as part of the Order validation process. This field must match the corresponding field on the DER encoded CSR. + type: array + items: + type: string + issuerRef: + description: IssuerRef references a properly configured ACME-type Issuer which should be used to create this Order. If the Issuer does not exist, processing will be retried. If the Issuer is not an 'ACME' Issuer, an error will be returned and the Order will be marked as failed. + type: object + required: + - name + properties: + group: + description: Group of the resource being referred to. + type: string + kind: + description: Kind of the resource being referred to. + type: string + name: + description: Name of the resource being referred to. + type: string + request: + description: Certificate signing request bytes in DER encoding. This will be used when finalizing the order. This field must be set on the order. + type: string + format: byte + status: + type: object + properties: + authorizations: + description: Authorizations contains data returned from the ACME server on what authorizations must be completed in order to validate the DNS names specified on the Order. + type: array + items: + description: ACMEAuthorization contains data returned from the ACME server on an authorization that must be completed in order validate a DNS name on an ACME Order resource. + type: object + required: + - url + properties: + challenges: + description: Challenges specifies the challenge types offered by the ACME server. One of these challenge types will be selected when validating the DNS name and an appropriate Challenge resource will be created to perform the ACME challenge process. + type: array + items: + description: Challenge specifies a challenge offered by the ACME server for an Order. An appropriate Challenge resource can be created to perform the ACME challenge process. + type: object + required: + - token + - type + - url + properties: + token: + description: Token is the token that must be presented for this challenge. This is used to compute the 'key' that must also be presented. + type: string + type: + description: Type is the type of challenge being offered, e.g. 'http-01', 'dns-01', 'tls-sni-01', etc. This is the raw value retrieved from the ACME server. Only 'http-01' and 'dns-01' are supported by cert-manager, other values will be ignored. + type: string + url: + description: URL is the URL of this challenge. It can be used to retrieve additional metadata about the Challenge from the ACME server. + type: string + identifier: + description: Identifier is the DNS name to be validated as part of this authorization + type: string + initialState: + description: InitialState is the initial state of the ACME authorization when first fetched from the ACME server. If an Authorization is already 'valid', the Order controller will not create a Challenge resource for the authorization. This will occur when working with an ACME server that enables 'authz reuse' (such as Let's Encrypt's production endpoint). If not set and 'identifier' is set, the state is assumed to be pending and a Challenge will be created. + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL is the URL of the Authorization that must be completed + type: string + wildcard: + description: Wildcard will be true if this authorization is for a wildcard DNS name. If this is true, the identifier will be the *non-wildcard* version of the DNS name. For example, if '*.example.com' is the DNS name being validated, this field will be 'true' and the 'identifier' field will be 'example.com'. + type: boolean + certificate: + description: Certificate is a copy of the PEM encoded certificate for this Order. This field will be populated after the order has been successfully finalized with the ACME server, and the order has transitioned to the 'valid' state. + type: string + format: byte + failureTime: + description: FailureTime stores the time that this order failed. This is used to influence garbage collection and back-off. + type: string + format: date-time + finalizeURL: + description: FinalizeURL of the Order. This is used to obtain certificates for this order once it has been completed. + type: string + reason: + description: Reason optionally provides more information about a why the order is in the current state. + type: string + state: + description: State contains the current state of this Order resource. States 'success' and 'expired' are 'final' + type: string + enum: + - valid + - ready + - pending + - processing + - invalid + - expired + - errored + url: + description: URL of the Order. This will initially be empty when the resource is first created. The Order controller will populate this field when the Order is first processed. This field will be immutable after it is initially set. + type: string + served: true + storage: true +--- +# Source: cert-manager/templates/cainjector-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +--- +# Source: cert-manager/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +--- +# Source: cert-manager/templates/webhook-serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +automountServiceAccountToken: true +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +--- +# Source: cert-manager/templates/webhook-config.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +data: +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["get", "create", "update", "patch"] + - apiGroups: ["admissionregistration.k8s.io"] + resources: + ["validatingwebhookconfigurations", "mutatingwebhookconfigurations"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiregistration.k8s.io"] + resources: ["apiservices"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["get", "list", "watch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Issuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "issuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ClusterIssuer controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "clusterissuers/status"] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Certificates controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: + [ + "certificates", + "certificates/status", + "certificaterequests", + "certificaterequests/status", + ] + verbs: ["update", "patch"] + - apiGroups: ["cert-manager.io"] + resources: + ["certificates", "certificaterequests", "clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["cert-manager.io"] + resources: ["certificates/finalizers", "certificaterequests/finalizers"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders"] + verbs: ["create", "delete", "get", "list", "watch"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch", "create", "update", "delete", "patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Orders controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "orders/status"] + verbs: ["update", "patch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders", "challenges"] + verbs: ["get", "list", "watch"] + - apiGroups: ["cert-manager.io"] + resources: ["clusterissuers", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["create", "delete"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["orders/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +# Challenges controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + # Use to update challenge resource status + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "challenges/status"] + verbs: ["update", "patch"] + # Used to watch challenge resources + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges"] + verbs: ["get", "list", "watch"] + # Used to watch challenges, issuer and clusterissuer resources + - apiGroups: ["cert-manager.io"] + resources: ["issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + # Need to be able to retrieve ACME account private key to complete challenges + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] + # Used to create events + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] + # HTTP01 rules + - apiGroups: [""] + resources: ["pods", "services"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["httproutes"] + verbs: ["get", "list", "watch", "create", "delete", "update"] + # We require the ability to specify a custom hostname when we are creating + # new ingress resources. + # See: https://github.com/openshift/origin/blob/21f191775636f9acadb44fa42beeb4f75b255532/pkg/route/apiserver/admission/ingress_admission.go#L84-L148 + - apiGroups: ["route.openshift.io"] + resources: ["routes/custom-host"] + verbs: ["create"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges/finalizers"] + verbs: ["update"] + # DNS01 rules (duplicated above) + - apiGroups: [""] + resources: ["secrets"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +# ingress-shim controller role +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests"] + verbs: ["create", "update", "delete"] + - apiGroups: ["cert-manager.io"] + resources: + ["certificates", "certificaterequests", "issuers", "clusterissuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses"] + verbs: ["get", "list", "watch"] + # We require these rules to support users with the OwnerReferencesPermissionEnforcement + # admission controller enabled: + # https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#ownerreferencespermissionenforcement + - apiGroups: ["networking.k8s.io"] + resources: ["ingresses/finalizers"] + verbs: ["update"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways", "httproutes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["gateway.networking.k8s.io"] + resources: ["gateways/finalizers", "httproutes/finalizers"] + verbs: ["update"] + - apiGroups: [""] + resources: ["events"] + verbs: ["create", "patch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-view + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + rbac.authorization.k8s.io/aggregate-to-view: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["get", "list", "watch"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["get", "list", "watch"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-edit + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-admin: "true" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["certificates", "certificaterequests", "issuers"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] + - apiGroups: ["cert-manager.io"] + resources: ["certificates/status"] + verbs: ["update"] + - apiGroups: ["acme.cert-manager.io"] + resources: ["challenges", "orders"] + verbs: ["create", "delete", "deletecollection", "patch", "update"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to approve CertificateRequests referencing cert-manager.io Issuers and ClusterIssuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["cert-manager.io"] + resources: ["signers"] + verbs: ["approve"] + resourceNames: + ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] +--- +# Source: cert-manager/templates/rbac.yaml +# Permission to: +# - Update and sign CertificatSigningeRequests referencing cert-manager.io Issuers and ClusterIssuers +# - Perform SubjectAccessReviews to test whether users are able to reference Namespaced Issuers +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["certificates.k8s.io"] + resources: ["certificatesigningrequests/status"] + verbs: ["update", "patch"] + - apiGroups: ["certificates.k8s.io"] + resources: ["signers"] + resourceNames: + ["issuers.cert-manager.io/*", "clusterissuers.cert-manager.io/*"] + verbs: ["sign"] + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["authorization.k8s.io"] + resources: ["subjectaccessreviews"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-cainjector + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-cainjector +subjects: + - name: cert-manager-cainjector + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-issuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-issuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-clusterissuers + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-clusterissuers +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificates + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificates +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-orders + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-orders +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-challenges + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-challenges +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-ingress-shim + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-ingress-shim +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-approve:cert-manager-io + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-approve:cert-manager-io +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-controller-certificatesigningrequests + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cert-manager" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-controller-certificatesigningrequests +subjects: + - name: cert-manager + namespace: cert-manager + kind: ServiceAccount +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cert-manager-webhook:subjectaccessreviews + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cert-manager-webhook:subjectaccessreviews +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# leader election rules +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +rules: + # Used for leader election by the controller + # cert-manager-cainjector-leader-election is used by the CertificateBased injector controller + # see cmd/cainjector/start.go#L113 + # cert-manager-cainjector-leader-election-core is used by the SecretBased injector controller + # see cmd/cainjector/start.go#L137 + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: + [ + "cert-manager-cainjector-leader-election", + "cert-manager-cainjector-leader-election-core", + ] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + resourceNames: ["cert-manager-controller"] + verbs: ["get", "update", "patch"] + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create"] +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +rules: + - apiGroups: [""] + resources: ["secrets"] + resourceNames: + - "cert-manager-webhook-ca" + verbs: ["get", "list", "watch", "update"] + # It's not possible to grant CREATE permission on a single resourceName. + - apiGroups: [""] + resources: ["secrets"] + verbs: ["create"] +--- +# Source: cert-manager/templates/cainjector-rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-cainjector:leaderelection + namespace: kube-system + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-cainjector:leaderelection +subjects: + - kind: ServiceAccount + name: cert-manager-cainjector + namespace: cert-manager +--- +# Source: cert-manager/templates/rbac.yaml +# grant cert-manager permission to manage the leaderelection configmap in the +# leader election namespace +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager:leaderelection + namespace: kube-system + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager:leaderelection +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager + namespace: cert-manager +--- +# Source: cert-manager/templates/webhook-rbac.yaml +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: cert-manager-webhook:dynamic-serving + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: cert-manager-webhook:dynamic-serving +subjects: + - apiGroup: "" + kind: ServiceAccount + name: cert-manager-webhook + namespace: cert-manager +--- +# Source: cert-manager/templates/service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +spec: + type: ClusterIP + ports: + - protocol: TCP + port: 9402 + name: tcp-prometheus-servicemonitor + targetPort: 9402 + selector: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" +--- +# Source: cert-manager/templates/webhook-service.yaml +apiVersion: v1 +kind: Service +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +spec: + type: ClusterIP + ports: + - name: https + port: 443 + protocol: TCP + targetPort: "https" + selector: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" +--- +# Source: cert-manager/templates/cainjector-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-cainjector + namespace: cert-manager + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + template: + metadata: + labels: + app: cainjector + app.kubernetes.io/name: cainjector + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "cainjector" + app.kubernetes.io/version: "v1.11.0" + spec: + serviceAccountName: cert-manager-cainjector + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-cainjector + image: "quay.io/jetstack/cert-manager-cainjector:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --leader-election-namespace=kube-system + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager + namespace: cert-manager + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + template: + metadata: + labels: + app: cert-manager + app.kubernetes.io/name: cert-manager + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "controller" + app.kubernetes.io/version: "v1.11.0" + annotations: + prometheus.io/path: "/metrics" + prometheus.io/scrape: "true" + prometheus.io/port: "9402" + spec: + serviceAccountName: cert-manager + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-controller + image: "quay.io/jetstack/cert-manager-controller:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --cluster-resource-namespace=$(POD_NAMESPACE) + - --leader-election-namespace=kube-system + - --acme-http01-solver-image=quay.io/jetstack/cert-manager-acmesolver:v1.11.0 + - --max-concurrent-challenges=60 + ports: + - containerPort: 9402 + name: http-metrics + protocol: TCP + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: cert-manager-webhook + namespace: cert-manager + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + template: + metadata: + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + spec: + serviceAccountName: cert-manager-webhook + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: cert-manager-webhook + image: "quay.io/jetstack/cert-manager-webhook:v1.11.0" + imagePullPolicy: IfNotPresent + args: + - --v=2 + - --secure-port=10250 + - --dynamic-serving-ca-secret-namespace=$(POD_NAMESPACE) + - --dynamic-serving-ca-secret-name=cert-manager-webhook-ca + - --dynamic-serving-dns-names=cert-manager-webhook + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE) + - --dynamic-serving-dns-names=cert-manager-webhook.$(POD_NAMESPACE).svc + + ports: + - name: https + protocol: TCP + containerPort: 10250 + - name: healthcheck + protocol: TCP + containerPort: 6080 + livenessProbe: + httpGet: + path: /livez + port: 6080 + scheme: HTTP + initialDelaySeconds: 60 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + readinessProbe: + httpGet: + path: /healthz + port: 6080 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + nodeSelector: + kubernetes.io/os: linux +--- +# Source: cert-manager/templates/webhook-mutating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + # Only include 'sideEffects' field in Kubernetes 1.12+ + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /mutate +--- +# Source: cert-manager/templates/webhook-validating-webhook.yaml +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: cert-manager-webhook + labels: + app: webhook + app.kubernetes.io/name: webhook + app.kubernetes.io/instance: cert-manager + app.kubernetes.io/component: "webhook" + app.kubernetes.io/version: "v1.11.0" + annotations: + cert-manager.io/inject-ca-from-secret: "cert-manager/cert-manager-webhook-ca" +webhooks: + - name: webhook.cert-manager.io + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + - key: "name" + operator: "NotIn" + values: + - cert-manager + rules: + - apiGroups: + - "cert-manager.io" + - "acme.cert-manager.io" + apiVersions: + - "v1" + operations: + - CREATE + - UPDATE + resources: + - "*/*" + admissionReviewVersions: ["v1"] + # This webhook only accepts v1 cert-manager resources. + # Equivalent matchPolicy ensures that non-v1 resource requests are sent to + # this webhook (after the resources have been converted to v1). + matchPolicy: Equivalent + timeoutSeconds: 10 + failurePolicy: Fail + sideEffects: None + clientConfig: + service: + name: cert-manager-webhook + namespace: cert-manager + path: /validate diff --git a/infra/base/dashboard-v2.4.0.yaml b/infra/base/dashboard-v2.4.0.yaml new file mode 100644 index 00000000..6d4a0bd3 --- /dev/null +++ b/infra/base/dashboard-v2.4.0.yaml @@ -0,0 +1,305 @@ +# https://raw.githubusercontent.com/kubernetes/dashboard/v2.4.0/aio/deploy/recommended.yaml + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Namespace +metadata: + name: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + ports: + - port: 443 + targetPort: 8443 + selector: + k8s-app: kubernetes-dashboard + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-certs + namespace: kubernetes-dashboard +type: Opaque + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-csrf + namespace: kubernetes-dashboard +type: Opaque +data: + csrf: "" + +--- + +apiVersion: v1 +kind: Secret +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-key-holder + namespace: kubernetes-dashboard +type: Opaque + +--- + +kind: ConfigMap +apiVersion: v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard-settings + namespace: kubernetes-dashboard + +--- + +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +rules: + # Allow Dashboard to get, update and delete Dashboard exclusive secrets. + - apiGroups: [""] + resources: ["secrets"] + resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"] + verbs: ["get", "update", "delete"] + # Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map. + - apiGroups: [""] + resources: ["configmaps"] + resourceNames: ["kubernetes-dashboard-settings"] + verbs: ["get", "update"] + # Allow Dashboard to get metrics. + - apiGroups: [""] + resources: ["services"] + resourceNames: ["heapster", "dashboard-metrics-scraper"] + verbs: ["proxy"] + - apiGroups: [""] + resources: ["services/proxy"] + resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"] + verbs: ["get"] + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard +rules: + # Allow Metrics Scraper to get metrics from the Metrics server + - apiGroups: ["metrics.k8s.io"] + resources: ["pods", "nodes"] + verbs: ["get", "list", "watch"] + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: kubernetes-dashboard +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: kubernetes-dashboard +subjects: + - kind: ServiceAccount + name: kubernetes-dashboard + namespace: kubernetes-dashboard + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: kubernetes-dashboard + name: kubernetes-dashboard + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: kubernetes-dashboard + template: + metadata: + labels: + k8s-app: kubernetes-dashboard + spec: + containers: + - name: kubernetes-dashboard + image: kubernetesui/dashboard:v2.4.0 + imagePullPolicy: Always + ports: + - containerPort: 8443 + protocol: TCP + args: + - --auto-generate-certificates + - --namespace=kubernetes-dashboard + # Uncomment the following line to manually specify Kubernetes API server Host + # If not specified, Dashboard will attempt to auto discover the API server and connect + # to it. Uncomment only if the default does not work. + # - --apiserver-host=http://my-address:port + volumeMounts: + - name: kubernetes-dashboard-certs + mountPath: /certs + # Create on-disk volume to store exec logs + - mountPath: /tmp + name: tmp-volume + livenessProbe: + httpGet: + scheme: HTTPS + path: / + port: 8443 + initialDelaySeconds: 30 + timeoutSeconds: 30 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + volumes: + - name: kubernetes-dashboard-certs + secret: + secretName: kubernetes-dashboard-certs + - name: tmp-volume + emptyDir: {} + serviceAccountName: kubernetes-dashboard + nodeSelector: + "kubernetes.io/os": linux + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + +--- + +kind: Service +apiVersion: v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + ports: + - port: 8000 + targetPort: 8000 + selector: + k8s-app: dashboard-metrics-scraper + +--- + +kind: Deployment +apiVersion: apps/v1 +metadata: + labels: + k8s-app: dashboard-metrics-scraper + name: dashboard-metrics-scraper + namespace: kubernetes-dashboard +spec: + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + k8s-app: dashboard-metrics-scraper + template: + metadata: + labels: + k8s-app: dashboard-metrics-scraper + spec: + securityContext: + seccompProfile: + type: RuntimeDefault + containers: + - name: dashboard-metrics-scraper + image: kubernetesui/metrics-scraper:v1.0.7 + ports: + - containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + scheme: HTTP + path: / + port: 8000 + initialDelaySeconds: 30 + timeoutSeconds: 30 + volumeMounts: + - mountPath: /tmp + name: tmp-volume + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsUser: 1001 + runAsGroup: 2001 + serviceAccountName: kubernetes-dashboard + nodeSelector: + "kubernetes.io/os": linux + # Comment the following tolerations if Dashboard must not be deployed on master + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule + volumes: + - name: tmp-volume + emptyDir: {} diff --git a/infra/base/ingress-nginx-patch.yaml b/infra/base/ingress-nginx-patch.yaml new file mode 100644 index 00000000..825e24ad --- /dev/null +++ b/infra/base/ingress-nginx-patch.yaml @@ -0,0 +1,30 @@ +# Redirect http to https +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx-controller + namespace: ingress-nginx +data: + ssl-redirect: "true" +--- +# Configure TLS certificate to use with --default-ssl-certificate +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + template: + spec: + containers: + - name: controller + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --default-ssl-certificate=cert-manager/tls-secret diff --git a/infra/base/ingress-nginx-v1.6.4.yaml b/infra/base/ingress-nginx-v1.6.4.yaml new file mode 100644 index 00000000..2e8eda2a --- /dev/null +++ b/infra/base/ingress-nginx-v1.6.4.yaml @@ -0,0 +1,650 @@ +# https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.6.4/deploy/static/provider/cloud/deploy.yaml + +# TODO: Compare file changes with original version since there were fixes done +# - 1) added get and update verbs on coordination.k8s.io + +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + name: ingress-nginx +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resourceNames: + - ingress-nginx-leader + resources: + - leases + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: v1 +data: + allow-snippet-annotations: "true" +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + externalTrafficPolicy: Local + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - appProtocol: http + name: http + port: 80 + protocol: TCP + targetPort: http + - appProtocol: https + name: https + port: 443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + ports: + - appProtocol: https + name: https-webhook + port: 443 + targetPort: webhook + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + minReadySeconds: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + spec: + containers: + - args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-nginx-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + image: registry.k8s.io/ingress-nginx/controller:v1.6.4@sha256:15be4666c53052484dd2992efacf2f50ea77a78ae8aa21ccd91af6baaa7ea22f + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + - containerPort: 8443 + name: webhook + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 90Mi + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-create + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-create + spec: + containers: + - args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f + imagePullPolicy: IfNotPresent + name: create + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-patch + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-patch + spec: + containers: + - args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f + imagePullPolicy: IfNotPresent + name: patch + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +webhooks: + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: ingress-nginx + path: /networking/v1/ingresses + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None diff --git a/infra/base/kustomization.yaml b/infra/base/kustomization.yaml new file mode 100644 index 00000000..2f4d701d --- /dev/null +++ b/infra/base/kustomization.yaml @@ -0,0 +1,21 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - metrics-server-v0.6.2.yaml + - cert-manager-v1.11.0.yaml + - cert-manager-resources.yaml + - ingress-nginx-v1.6.4.yaml + # - dashboard-v2.4.0.yaml + +patchesStrategicMerge: + - ingress-nginx-patch.yaml + +secretGenerator: + - name: cloudflare-api-token + namespace: cert-manager + type: Opaque + envs: + - cloudflare-api-token.env + options: + disableNameSuffixHash: true diff --git a/infra/base/metrics-server-v0.6.2.yaml b/infra/base/metrics-server-v0.6.2.yaml new file mode 100644 index 00000000..5913c623 --- /dev/null +++ b/infra/base/metrics-server-v0.6.2.yaml @@ -0,0 +1,197 @@ +# https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.2/components.yaml + +# TODO: Compare file changes with original version since there were fixes done +# - 1) removed resource requests + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: + - apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get + - apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + image: registry.k8s.io/metrics-server/metrics-server:v0.6.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/infra/datacoves-install.yml b/infra/datacoves-install.yml new file mode 100644 index 00000000..37988b48 --- /dev/null +++ b/infra/datacoves-install.yml @@ -0,0 +1,109 @@ +- name: Install Datacoves + hosts: localhost + connection: local + tasks: + - name: Determine Working Directory + shell: "cd .. && pwd" + register: working_dir + + - name: Create Virtual Environment + pip: + virtualenv: "{{ working_dir['stdout'] }}/.venv" + virtualenv_command: "python3 -m venv" + requirements: "{{ working_dir['stdout'] }}/requirements.txt" + + # Reveal secrets is optional because it is easier to develop locally without + # re-doing this; just let the secrets reveal once. + - name: Reveal Secrets + when: lookup('env', 'DC_SKIP_REVEAL_SECRETS') != '1' + shell: + cmd: | + export OP_SERVICE_ACCOUNT_TOKEN="{{ lookup('env', 'OP_SERVICE_ACCOUNT_TOKEN') }}" + source {{ working_dir['stdout'] }}/.venv/bin/activate + ./cli.py reveal_secrets -y + chdir: "{{ working_dir['stdout'] }}" + + - name: Create configuration path + file: + path: "{{ working_dir['stdout'] }}/config/{{ hostname }}" + state: directory + + - name: "Create configuration path: base" + file: + path: "{{ working_dir['stdout'] }}/config/{{ hostname }}/base" + state: directory + + - name: "Create configuration path: secrets" + file: + path: "{{ working_dir['stdout'] }}/config/{{ hostname }}/secrets" + state: directory + + - name: Create cluster-params.yaml + template: + src: cluster-params.j2 + dest: "{{ working_dir['stdout'] }}/config/{{ hostname }}/cluster-params.yaml" + + - name: Read datacoveslocal cluster-params.secrets + ansible.builtin.shell: "cat {{ working_dir['stdout'] }}/config/datacoveslocal.com/cluster-params.secret.yaml" + register: results + + - name: Grab Secrets from datacoveslocal + set_fact: + cluster_secrets: "{{ results.stdout | from_yaml}}" + + - name: Read datacoveslocal core-api secrets + ansible.builtin.shell: "env -i bash --noprofile --norc -c 'set -a; source {{ working_dir['stdout'] }}/config/datacoveslocal.com/secrets/core-api.env; env'" + register: results + changed_when: false + + - name: Grab Secrets from datacoveslocal + ansible.builtin.set_fact: + coreapi_secrets: "{{ ('{' + results.stdout_lines | map('regex_replace', '([^=]*)=(.*)', '\"\\1\": \"\\2\"') | join(',') + '}') | from_json }}" + + - name: Read datacoveslocal core-dbt-api secrets + ansible.builtin.shell: "env -i bash --noprofile --norc -c 'set -a; source {{ working_dir['stdout'] }}/config/datacoveslocal.com/secrets/core-dbt-api.env; env'" + register: results + changed_when: false + + - name: Grab Secrets from datacoveslocal + ansible.builtin.set_fact: + coredbtapi_secrets: "{{ ('{' + results.stdout_lines | map('regex_replace', '([^=]*)=(.*)', '\"\\1\": \"\\2\"') | join(',') + '}') | from_json }}" + + - name: Create cluster-params.secret.yaml + template: + src: cluster-params.secret.j2 + dest: "{{ working_dir['stdout'] }}/config/{{ hostname }}/cluster-params.secret.yaml" + + - name: Create core-api.env + template: + src: core-api.j2 + dest: "{{ working_dir['stdout'] }}/config/{{ hostname }}/secrets/core-api.env" + + - name: Create core-dbt-api.env + template: + src: core-dbt-api.j2 + dest: "{{ working_dir['stdout'] }}/config/{{ hostname }}/secrets/core-dbt-api.env" + + - name: Copy docker config + ansible.builtin.copy: + src: "{{ working_dir['stdout'] }}/config/datacoveslocal.com/secrets/docker-config.secret.json" + dest: "{{ working_dir['stdout'] }}/config/{{ hostname }}/secrets/" + + - name: Copy base directory + ansible.builtin.copy: + src: "{{ working_dir['stdout'] }}/infra/base" + dest: "{{ working_dir['stdout'] }}/config/{{ hostname }}/" + + - name: Create cert-manager-resources.yaml + ansible.builtin.template: + src: cert-manager-resources.j2 + dest: "{{ working_dir['stdout'] }}/config/{{ hostname }}/base/cert-manager-resources.yaml" + +# - name: Set release and install +# ansible.builtin.shell: +# cmd: | +# export KUBECONFIG=$HOME/.kube/config:{{ working_dir['stdout'] }}/{{ aks_name}}.kubeconfig +# source {{ working_dir['stdout'] }}/.venv/bin/activate +# ./cli.py set_latest_release {{ hostname }} && ./cli.py setup_base {{ hostname }} && ./cli.py install {{ hostname }} COMPLY +# chdir: "{{ working_dir['stdout'] }}" + diff --git a/infra/kubenote.md b/infra/kubenote.md new file mode 100644 index 00000000..f56d8e63 --- /dev/null +++ b/infra/kubenote.md @@ -0,0 +1,16 @@ + - name: Create cluster-params.secret.yaml + template: + src: cluster-params.secret.j2 + dest: "../config/{{ hostname }}/cluster-params.secret.yml" + backup: true + + + hostname: "{{ lookup('env', 'DC_HOSTNAME') }}" + release: "{{ lookup('env', 'DC_RELEASE') }}" + sentry_dsn_operator: "{{ lookup('env', 'DC_SENTRY_DSN_OPERATOR') }}" + sentry_dsn: "{{ lookup('env', 'DC_SENTRY_DSN') }}" + slack_token: "{{ lookup('env', 'DC_SLACK_TOKEN') }}" + + +# +# export KUBECONFIG=/path/to/first/config:/path/to/second/config" \ No newline at end of file diff --git a/infra/templates/cert-manager-resources.j2 b/infra/templates/cert-manager-resources.j2 new file mode 100644 index 00000000..ece3fc66 --- /dev/null +++ b/infra/templates/cert-manager-resources.j2 @@ -0,0 +1,31 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt +spec: + acme: + email: hey@datacoves.com + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + name: letsencrypt-issuer-key + solvers: + - dns01: + cloudflare: + apiTokenSecretRef: + name: cloudflare-api-token + key: CLOUDFLARE_API_TOKEN +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: datacoves-cluster-root-cert + namespace: cert-manager +spec: + secretName: tls-secret + issuerRef: + group: cert-manager.io + kind: ClusterIssuer + name: letsencrypt + dnsNames: + - "*.{{ hostname }}" + - "{{ hostname }}" diff --git a/infra/templates/cluster-params.j2 b/infra/templates/cluster-params.j2 new file mode 100644 index 00000000..b715b11c --- /dev/null +++ b/infra/templates/cluster-params.j2 @@ -0,0 +1,93 @@ +domain: {{ hostname }} +context: {{ kubectl_context }} +provider: aks +kubernetes_version: "{{ kubernetes_version }}" + +## Docker image defaults +release: "{{ release }}" + +# docker_registry: +# docker_secret_name: +# extra_images: + +## cert-manager +cert_manager_issuer: letsencrypt + +## internal-dns to allow traffic to it +# Commented out bec it was being resolved incorrectly +# internal_dns_url: datacoveskube-dns-j3b0n2u7.hcp.centralus.azmk8s.io + +## External services +airbyte_config: + db: + external: true + backend: postgres + tls: true + tls_enabled: true + host_verification: false + tls_disable_host_verification: true + logs: + external: true + backend: afs + +datahub_config: + db: + external: true + backend: postgres + +airflow_db_external: true +airflow_logs_external: true +superset_db_external: true +core_liveness_readiness: false + +# network policy configuration +internal_db_cluster_ip_range: 10.225.0.0/24 + +## Project +projects: + balboa-analytics: + name: Balboa Analytics + slug: balboa-analytics-datacoves + clone_strategy: http_clone + repository: + url: https://github.com/datacoves/balboa.git + git_url: git@github.com:datacoves/balboa.git + +## Account +account: + name: Datacoves Test + slug: datacoves-test + owner: + email: gomezn@convexa.ai + name: Noel Gomez + +## Features +features_enabled: + admin_groups: true + admin_projects: true + admin_connections: true + admin_environments: true + admin_integrations: true + admin_secrets: true + admin_service_credentials: true + local_airflow: true + user_profile_change_name: true + user_profile_change_credentials: true + user_profile_change_ssh_keys: true + user_profile_change_ssl_keys: true + stop_codeserver_on_inactivity: true + codeserver_restart: true + observability_stack: true + show_get_started_banner: true + +airflow_config: + db: + external: true + logs: + backend: afs + external: true + +min_replicas_worker_main: 1 +max_replicas_worker_main: 2 +min_replicas_api: 1 +max_replicas_api: 2 diff --git a/infra/templates/cluster-params.secret.j2 b/infra/templates/cluster-params.secret.j2 new file mode 100644 index 00000000..c10b0804 --- /dev/null +++ b/infra/templates/cluster-params.secret.j2 @@ -0,0 +1,28 @@ +postgres_db_provisioner: + host: {{ database_host }} + db: datacoves + user: datacoves_admin + pass: "{{ database_password }}" + +# My research indicates this DSN isn't really very secret and there isn't +# a serious risk in putting this in our GIT repo. It makes our configuration +# much simpler to have it here, so I would rather just have it here. +# - sconley +operator_sentry_dsn: "https://b4d54fe4d14746729baa351a2d3bf4f9@o1145668.ingest.sentry.io/4504730556170240" + +grafana: + postgres_password: "{{ cluster_secrets['grafana']['postgres_password'] }}" + admin_password: "{{ cluster_secrets['grafana']['admin_password'] }}" + loki: + provider: minio + password: "{{ cluster_secrets['grafana']['loki']['password'] }}" + +core_minio_config: + enabled: true + bucket: "dbt-api" + username: "admin" + password: "{{ cluster_secrets['core_minio_config']['password'] }}" + +core_db_service_account_read_only: + username: datacoves_ro + password: ThisIsJustATest diff --git a/infra/templates/core-api.j2 b/infra/templates/core-api.j2 new file mode 100644 index 00000000..2af12e73 --- /dev/null +++ b/infra/templates/core-api.j2 @@ -0,0 +1,43 @@ +DEBUG=True +USER_AND_PASS_AUTH=True + +# Hard coding these for the test server +SECRET_KEY='^dc$s=8@)tutjb&4cde3)ny+@)y_8pc-&3&!3fdq=b$!$-(1si' +FERNET_KEY=R1wy0eHlc1VSV25OZGfXDHmwD3uGlEZUIvFdhgwYrDI= + +ALLOWED_HOSTS=.{{ hostname }} + +DB_HOST={{ database_host }} +DB_NAME=datacoves +DB_USER=datacoves_admin +DB_PASS={{ database_password }} + +IDENTITY_PROVIDER=auth0 +AUTH0_DOMAIN={{ coreapi_secrets['AUTH0_DOMAIN'] }} +AUTH0_CLIENT_ID={{ coreapi_secrets['AUTH0_CLIENT_ID'] }} +AUTH0_CLIENT_SECRET={{ coreapi_secrets['AUTH0_CLIENT_SECRET'] }} +IDP_SERVICE_ACCOUNT={{ coreapi_secrets['IDP_SERVICE_ACCOUNT'] }} + +# base64 config/{domain}/secrets/docker-config.secret.json +DEFAULT_DOCKER_CONFIG={{ coreapi_secrets['DEFAULT_DOCKER_CONFIG'] }} + +# Stripe would go here if we wanted to enable it. +#STRIPE_WEBHOOK_SECRET= +#STRIPE_API_KEY= +#STRIPE_CUSTOMER_PORTAL= + +CELERY_BROKER_URL=redis://redis-master:6379/1 +FLOWER_USERNAME=flower +FLOWER_PASSWORD=flower + +REDIS_URI=redis://redis-master:6379/1 + +# This is optional (some customers may not like external logs reporting) +SENTRY_DSN=https://5d7d4b6b765d41a295ba80e70d685cf2@o1145668.ingest.sentry.io/6213267 + +# This is pretty problematic and I don't think we need it. I can fix it if +# it is necessary, or just generate it with "openssl genrsa -out oidc.key 4096" +#OAUTH_OIDC_RSA_KEY= + +# Slack token +#SLACK_BOT_TOKEN= diff --git a/infra/templates/core-dbt-api.j2 b/infra/templates/core-dbt-api.j2 new file mode 100644 index 00000000..e3eba292 --- /dev/null +++ b/infra/templates/core-dbt-api.j2 @@ -0,0 +1,24 @@ +DB_SSL_ENABLED=true +DB_USER=datacoves_admin +DB_PASS={{ database_password }} +DB_HOST={{ database_host }} +DB_NAME=dbt_api + +DATACOVES_DB_USER=datacoves_ro +DATACOVES_DB_PASS=ThisIsJustATest +DATACOVES_DB_HOST={{ database_host }} +DATACOVES_DB_NAME=datacoves + +CONNECT_TO_AIRFLOW=true +S3_BUCKET_NAME=dbt-api +MINIO_URL=http://minio.core.svc.cluster.local:9000 +MINIO_ACCESS_KEY=admin +MINIO_SECRET_KEY={{ cluster_secrets['core_minio_config']['password'] }} + + +SECRET_KEY_BASE={{ coredbtapi_secrets['SECRET_KEY_BASE'] }} +INTERNAL_BEARER_TOKEN=dev_internal_bearer_token +IEX_COOKIE={{ coredbtapi_secrets['IEX_COOKIE'] }} + +# Hard coding for test - must match core-api.j2 +FERNET_KEY=R1wy0eHlc1VSV25OZGfXDHmwD3uGlEZUIvFdhgwYrDI= diff --git a/infra/templates/ingress-nginx-patch.yaml b/infra/templates/ingress-nginx-patch.yaml new file mode 100644 index 00000000..f65530ae --- /dev/null +++ b/infra/templates/ingress-nginx-patch.yaml @@ -0,0 +1,39 @@ +# Redirect http to https +apiVersion: v1 +kind: ConfigMap +metadata: + name: ingress-nginx-controller + namespace: ingress-nginx +data: + ssl-redirect: "true" +--- +# Configure internal load balancer +apiVersion: v1 +kind: Service +metadata: + annotations: + service.beta.kubernetes.io/azure-load-balancer-internal: "true" + name: ingress-nginx-controller + namespace: ingress-nginx +--- +# Configure TLS certificate to use with --default-ssl-certificate +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + template: + spec: + containers: + - name: controller + args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-controller-leader + - --controller-class=k8s.io/ingress-nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + - --default-ssl-certificate=ingress-nginx/default-tls diff --git a/infra/templates/ingress-nginx-v1.6.4.yaml b/infra/templates/ingress-nginx-v1.6.4.yaml new file mode 100644 index 00000000..2e8eda2a --- /dev/null +++ b/infra/templates/ingress-nginx-v1.6.4.yaml @@ -0,0 +1,650 @@ +# https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.6.4/deploy/static/provider/cloud/deploy.yaml + +# TODO: Compare file changes with original version since there were fixes done +# - 1) added get and update verbs on coordination.k8s.io + +apiVersion: v1 +kind: Namespace +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + name: ingress-nginx +--- +apiVersion: v1 +automountServiceAccountToken: true +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - namespaces + verbs: + - get + - apiGroups: + - "" + resources: + - configmaps + - pods + - secrets + - endpoints + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - coordination.k8s.io + resourceNames: + - ingress-nginx-leader + resources: + - leases + verbs: + - get + - update + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - create + - get + - update + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx +rules: + - apiGroups: + - "" + resources: + - configmaps + - endpoints + - nodes + - pods + - secrets + - namespaces + verbs: + - list + - watch + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - list + - watch + - apiGroups: + - "" + resources: + - nodes + verbs: + - get + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + - apiGroups: + - networking.k8s.io + resources: + - ingresses/status + verbs: + - update + - apiGroups: + - networking.k8s.io + resources: + - ingressclasses + verbs: + - get + - list + - watch + - apiGroups: + - discovery.k8s.io + resources: + - endpointslices + verbs: + - list + - watch + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - validatingwebhookconfigurations + verbs: + - get + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission + namespace: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx +subjects: + - kind: ServiceAccount + name: ingress-nginx + namespace: ingress-nginx +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: ingress-nginx-admission +subjects: + - kind: ServiceAccount + name: ingress-nginx-admission + namespace: ingress-nginx +--- +apiVersion: v1 +data: + allow-snippet-annotations: "true" +kind: ConfigMap +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + externalTrafficPolicy: Local + ipFamilies: + - IPv4 + ipFamilyPolicy: SingleStack + ports: + - appProtocol: http + name: http + port: 80 + protocol: TCP + targetPort: http + - appProtocol: https + name: https + port: 443 + protocol: TCP + targetPort: https + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: LoadBalancer +--- +apiVersion: v1 +kind: Service +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller-admission + namespace: ingress-nginx +spec: + ports: + - appProtocol: https + name: https-webhook + port: 443 + targetPort: webhook + selector: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + type: ClusterIP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-controller + namespace: ingress-nginx +spec: + minReadySeconds: 0 + revisionHistoryLimit: 10 + selector: + matchLabels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + template: + metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + spec: + containers: + - args: + - /nginx-ingress-controller + - --publish-service=$(POD_NAMESPACE)/ingress-nginx-controller + - --election-id=ingress-nginx-leader + - --controller-class=k8s.io/ingress-nginx + - --ingress-class=nginx + - --configmap=$(POD_NAMESPACE)/ingress-nginx-controller + - --validating-webhook=:8443 + - --validating-webhook-certificate=/usr/local/certificates/cert + - --validating-webhook-key=/usr/local/certificates/key + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: LD_PRELOAD + value: /usr/local/lib/libmimalloc.so + image: registry.k8s.io/ingress-nginx/controller:v1.6.4@sha256:15be4666c53052484dd2992efacf2f50ea77a78ae8aa21ccd91af6baaa7ea22f + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: controller + ports: + - containerPort: 80 + name: http + protocol: TCP + - containerPort: 443 + name: https + protocol: TCP + - containerPort: 8443 + name: webhook + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + resources: + requests: + cpu: 100m + memory: 90Mi + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - NET_BIND_SERVICE + drop: + - ALL + runAsUser: 101 + volumeMounts: + - mountPath: /usr/local/certificates/ + name: webhook-cert + readOnly: true + dnsPolicy: ClusterFirst + nodeSelector: + kubernetes.io/os: linux + serviceAccountName: ingress-nginx + terminationGracePeriodSeconds: 300 + volumes: + - name: webhook-cert + secret: + secretName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-create + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-create + spec: + containers: + - args: + - create + - --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc + - --namespace=$(POD_NAMESPACE) + - --secret-name=ingress-nginx-admission + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f + imagePullPolicy: IfNotPresent + name: create + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: batch/v1 +kind: Job +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-patch + namespace: ingress-nginx +spec: + template: + metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission-patch + spec: + containers: + - args: + - patch + - --webhook-name=ingress-nginx-admission + - --namespace=$(POD_NAMESPACE) + - --patch-mutating=false + - --secret-name=ingress-nginx-admission + - --patch-failure-policy=Fail + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + image: registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20220916-gd32f8c343@sha256:39c5b2e3310dc4264d638ad28d9d1d96c4cbb2b2dcfb52368fe4e3c63f61e10f + imagePullPolicy: IfNotPresent + name: patch + securityContext: + allowPrivilegeEscalation: false + nodeSelector: + kubernetes.io/os: linux + restartPolicy: OnFailure + securityContext: + fsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + serviceAccountName: ingress-nginx-admission +--- +apiVersion: networking.k8s.io/v1 +kind: IngressClass +metadata: + labels: + app.kubernetes.io/component: controller + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: nginx +spec: + controller: k8s.io/ingress-nginx +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + labels: + app.kubernetes.io/component: admission-webhook + app.kubernetes.io/instance: ingress-nginx + app.kubernetes.io/name: ingress-nginx + app.kubernetes.io/part-of: ingress-nginx + app.kubernetes.io/version: 1.6.4 + name: ingress-nginx-admission +webhooks: + - admissionReviewVersions: + - v1 + clientConfig: + service: + name: ingress-nginx-controller-admission + namespace: ingress-nginx + path: /networking/v1/ingresses + failurePolicy: Fail + matchPolicy: Equivalent + name: validate.nginx.ingress.kubernetes.io + rules: + - apiGroups: + - networking.k8s.io + apiVersions: + - v1 + operations: + - CREATE + - UPDATE + resources: + - ingresses + sideEffects: None diff --git a/infra/templates/kustomization.yaml b/infra/templates/kustomization.yaml new file mode 100644 index 00000000..d9acc966 --- /dev/null +++ b/infra/templates/kustomization.yaml @@ -0,0 +1,36 @@ +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +resources: + - metrics-server-v0.6.2.yaml + - ingress-nginx-v1.6.4.yaml + +patchesStrategicMerge: + - ingress-nginx-patch.yaml + +secretGenerator: + # TLS certificates for core namespace + - name: default-tls + namespace: ingress-nginx + type: kubernetes.io/tls + files: + - tls.crt=wildcard.cer + - tls.key=wildcard.secret.key + options: + disableNameSuffixHash: true + - name: wildcard-tls + namespace: core + type: kubernetes.io/tls + files: + - tls.crt=wildcard.cer + - tls.key=wildcard.secret.key + options: + disableNameSuffixHash: true + - name: root-tls + namespace: core + type: kubernetes.io/tls + files: + - tls.crt=root.cer + - tls.key=root.secret.key + options: + disableNameSuffixHash: true diff --git a/infra/templates/metrics-server-v0.6.2.yaml b/infra/templates/metrics-server-v0.6.2.yaml new file mode 100644 index 00000000..5913c623 --- /dev/null +++ b/infra/templates/metrics-server-v0.6.2.yaml @@ -0,0 +1,197 @@ +# https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.2/components.yaml + +# TODO: Compare file changes with original version since there were fixes done +# - 1) removed resource requests + +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + rbac.authorization.k8s.io/aggregate-to-admin: "true" + rbac.authorization.k8s.io/aggregate-to-edit: "true" + rbac.authorization.k8s.io/aggregate-to-view: "true" + name: system:aggregated-metrics-reader +rules: + - apiGroups: + - metrics.k8s.io + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +rules: + - apiGroups: + - "" + resources: + - nodes/metrics + verbs: + - get + - apiGroups: + - "" + resources: + - pods + - nodes + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server-auth-reader + namespace: kube-system +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: extension-apiserver-authentication-reader +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: metrics-server:system:auth-delegator +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:auth-delegator +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + k8s-app: metrics-server + name: system:metrics-server +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:metrics-server +subjects: + - kind: ServiceAccount + name: metrics-server + namespace: kube-system +--- +apiVersion: v1 +kind: Service +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + ports: + - name: https + port: 443 + protocol: TCP + targetPort: https + selector: + k8s-app: metrics-server +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + labels: + k8s-app: metrics-server + name: metrics-server + namespace: kube-system +spec: + selector: + matchLabels: + k8s-app: metrics-server + strategy: + rollingUpdate: + maxUnavailable: 0 + template: + metadata: + labels: + k8s-app: metrics-server + spec: + containers: + - args: + - --cert-dir=/tmp + - --secure-port=4443 + - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname + - --kubelet-use-node-status-port + - --metric-resolution=15s + image: registry.k8s.io/metrics-server/metrics-server:v0.6.2 + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /livez + port: https + scheme: HTTPS + periodSeconds: 10 + name: metrics-server + ports: + - containerPort: 4443 + name: https + protocol: TCP + readinessProbe: + failureThreshold: 3 + httpGet: + path: /readyz + port: https + scheme: HTTPS + initialDelaySeconds: 20 + periodSeconds: 10 + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + runAsNonRoot: true + runAsUser: 1000 + volumeMounts: + - mountPath: /tmp + name: tmp-dir + nodeSelector: + kubernetes.io/os: linux + priorityClassName: system-cluster-critical + serviceAccountName: metrics-server + volumes: + - emptyDir: {} + name: tmp-dir +--- +apiVersion: apiregistration.k8s.io/v1 +kind: APIService +metadata: + labels: + k8s-app: metrics-server + name: v1beta1.metrics.k8s.io +spec: + group: metrics.k8s.io + groupPriorityMinimum: 100 + insecureSkipTLSVerify: true + service: + name: metrics-server + namespace: kube-system + version: v1beta1 + versionPriority: 100 diff --git a/infra/variables.yml b/infra/variables.yml new file mode 100644 index 00000000..5a1a4f41 --- /dev/null +++ b/infra/variables.yml @@ -0,0 +1,25 @@ +# There is some potential interdependence of variables between different +# playbooks; this allows us to put all the variables in one place. We +# only run this on 'localhost' so there will only ever be one host here. +all: + hosts: + localhost + + vars: + # AKS Specific + ## REQUIRED + service_client_id: "{{ lookup('env', 'DC_AZ_SERVICE_CLIENT_ID') }}" + service_client_secret: "{{ lookup('env', 'DC_AZ_SERVICE_CLIENT_SECRET') }}" + + ## OPTIONAL + resource_group: "{{ lookup('env', 'DC_AZ_RESOURCE_GROUP', default='DatacovesTesting') }}" + location: "{{ lookup('env', 'DC_AZ_LOCATION', default='eastus') }}" + aks_name: "{{ lookup('env', 'DC_AZ_AKS_NAME', default='datacoves-test') }}" + kubernetes_version: "{{ lookup('env', 'DC_AZ_KUBERNETES_VERSION', default='1.31.5') }}" + database_password: "{{ lookup('env', 'DC_AZ_DB_PASSWORD', default='password') }}" + database_host: "{{ aks_name }}-db.postgres.database.azure.com" + + ## OPTIONAL + hostname: "{{ lookup('env', 'DC_HOSTNAME', default='datacoves-test.datacoves.com') }}" + kubectl_context: "{{ lookup('env', 'DC_KUBECTL_CONTEXT', default=aks_name) }}" + release: "{{ lookup('env', 'DC_RELEASE', 'latest') }}" diff --git a/package-lock.json b/package-lock.json index 41d2e110..abf67bcf 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,14 +1,8 @@ { - "name": "docs", - "lockfileVersion": 3, "requires": true, - "packages": { - "": { - "dependencies": { - "minisearch": "^6.3.0" - } - }, - "node_modules/minisearch": { + "lockfileVersion": 1, + "dependencies": { + "minisearch": { "version": "6.3.0", "resolved": "https://registry.npmjs.org/minisearch/-/minisearch-6.3.0.tgz", "integrity": "sha512-ihFnidEeU8iXzcVHy74dhkxh/dn8Dc08ERl0xwoMMGqp4+LvRSCgicb+zGqWthVokQKvCSxITlh3P08OzdTYCQ==" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..d16cb231 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,7 @@ +[tool.black] +line-length = 88 +# Excluding hidden files makes sense, and covers .git, .venv, etc... +exclude = '/\..*|__pycache__|migrations|node_modules|^/docker|^/config|^/local|^/.generated' +[tool.isort] +profile = "black" +extend_skip = ["__pycache__", "migrations", "node_modules"] diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 00000000..5af4e5cf --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,5 @@ +-r requirements.txt +black==23.10.1 +ipdb==0.13.9 +ipython>=7.16.2 +playwright==1.45.0 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 7b8496f3..7a006d8f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,14 @@ -markdown -beautifulsoup4 -pygments - +flake8==7.1.1 +isort==5.10.1 +kubernetes==29.0.0 +pre-commit==2.15.0 +pyfiglet==0.7 +PyGithub==2.3.0 +PyYAML==6.0.1 +questionary==1.10.0 +requests==2.27.1 +rich==10.16.2 +stripe==10.4.0 +beautifulsoup4==4.12.3 +Markdown==3.6 +Pygments==2.18.0 diff --git a/scripts/__init__.py b/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/console.py b/scripts/console.py new file mode 100644 index 00000000..64550888 --- /dev/null +++ b/scripts/console.py @@ -0,0 +1,17 @@ +import pyfiglet +from rich import print +from rich.console import Console +from rich.panel import Padding, Panel + + +def print_logo(): + console = Console() + logo_str = str(pyfiglet.figlet_format("datacoves")) + console.print(logo_str, style="red") + console.print( + "Analytics Workbench for the Modern Data Stack\n", style="bold orange1" + ) + + +def print_title(title): + print(Padding(Panel(f"> {title}"), (1, 0), style="bold yellow")) diff --git a/scripts/data/ec2-instance-types.json b/scripts/data/ec2-instance-types.json new file mode 100644 index 00000000..91a4651d --- /dev/null +++ b/scripts/data/ec2-instance-types.json @@ -0,0 +1,46216 @@ +[ + { + "InstanceType": "c6i.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 950, + "Disks": [ + { + "SizeInGB": 950, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "im4gn.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3750, + "Disks": [ + { + "SizeInGB": 3750, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g5.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 600, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 24576 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r3.8xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 640, + "Disks": [ + { + "SizeInGB": 320, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c4.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.9 + }, + "VCpuInfo": { + "DefaultVCpus": 36, + "DefaultCores": 18, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 61440 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4000, + "BaselineThroughputInMBps": 500.0, + "BaselineIops": 32000, + "MaximumBandwidthInMbps": 4000, + "MaximumThroughputInMBps": 500.0, + "MaximumIops": 32000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6g.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c4.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.9 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 30720 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2000, + "BaselineThroughputInMBps": 250.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2000, + "MaximumThroughputInMBps": 250.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t1.micro", + "CurrentGeneration": false, + "FreeTierEligible": true, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 627 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "unsupported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Very Low", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Very Low", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5ad.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5n.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 21504 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6a.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2122, + "BaselineThroughputInMBps": 265.25, + "BaselineIops": 8333, + "MaximumBandwidthInMbps": 6666, + "MaximumThroughputInMBps": 833.333333, + "MaximumIops": 26667 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5dn.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "a1.medium", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 300, + "BaselineThroughputInMBps": 37.5, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5ad.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 6, + 12, + 18, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6780, + "BaselineThroughputInMBps": 847.5, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6780, + "MaximumThroughputInMBps": 847.5, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5d.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 600, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3en.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 60000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 8, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5a.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1085, + "BaselineThroughputInMBps": 135.625, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x1e.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 240, + "Disks": [ + { + "SizeInGB": 240, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 7400, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 7400 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6gn.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 38000, + "BaselineThroughputInMBps": 4750.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 38000, + "MaximumThroughputInMBps": 4750.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r6gd.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 474, + "Disks": [ + { + "SizeInGB": 474, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "i3en.3xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 12, + "DefaultCores": 6, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 7500, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3800, + "BaselineThroughputInMBps": 475.0, + "BaselineIops": 15000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 237, + "Disks": [ + { + "SizeInGB": 237, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r6i.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1250, + "BaselineThroughputInMBps": 156.25, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5dn.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 600, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5dn.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m2.4xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 70041 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1680, + "Disks": [ + { + "SizeInGB": 840, + "Count": 2, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "unsupported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "im4gn.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1875, + "Disks": [ + { + "SizeInGB": 1875, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2500, + "BaselineThroughputInMBps": 312.5, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "f1.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 470, + "Disks": [ + { + "SizeInGB": 470, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1700, + "BaselineThroughputInMBps": 212.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 1700, + "MaximumThroughputInMBps": 212.5, + "MaximumIops": 12000 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "FpgaInfo": { + "Fpgas": [ + { + "Name": "Virtex UltraScale (VU9P)", + "Manufacturer": "Xilinx", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 65536 + } + } + ], + "TotalFpgaMemoryInMiB": 65536 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6g.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5n.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x1e.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 999424 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 960, + "Disks": [ + { + "SizeInGB": 960, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6g.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r6g.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 315, + "BaselineThroughputInMBps": 39.375, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5a.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5zn.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 800, + "BaselineThroughputInMBps": 100.0, + "BaselineIops": 3333, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5a.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2880, + "BaselineThroughputInMBps": 360.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6a.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 531, + "BaselineThroughputInMBps": 66.375, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 6666, + "MaximumThroughputInMBps": 833.333333, + "MaximumIops": 26667 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6g.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 630, + "BaselineThroughputInMBps": 78.75, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5zn.6xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.5 + }, + "VCpuInfo": { + "DefaultVCpus": 24, + "DefaultCores": 12, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3a.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1900, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r4.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 499712 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 75000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 75000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5b.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 30000, + "BaselineThroughputInMBps": 3750.0, + "BaselineIops": 130000, + "MaximumBandwidthInMbps": 30000, + "MaximumThroughputInMBps": 3750.0, + "MaximumIops": 130000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5ad.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 6, + 12, + 18, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6780, + "BaselineThroughputInMBps": 847.5, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6780, + "MaximumThroughputInMBps": 847.5, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6gd.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 237, + "Disks": [ + { + "SizeInGB": 237, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c6i.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r3.large", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 15360 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 32, + "Disks": [ + { + "SizeInGB": 32, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5d.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t4g.nano", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 512 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 43, + "BaselineThroughputInMBps": 5.375, + "BaselineIops": 250, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5dn.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "d3en.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 223680, + "Disks": [ + { + "SizeInGB": 13980, + "Count": 16, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 5000, + "MaximumThroughputInMBps": 625.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 20, + "Ipv6AddressesPerInterface": 20, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "d3en.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 55920, + "Disks": [ + { + "SizeInGB": 13980, + "Count": 4, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1700, + "BaselineThroughputInMBps": 212.5, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 2800, + "MaximumThroughputInMBps": 350.0, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 5, + "Ipv6AddressesPerInterface": 5, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6gd.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 118, + "Disks": [ + { + "SizeInGB": 118, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 630, + "BaselineThroughputInMBps": 78.75, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5ad.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 300, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 6600, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "d2.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 36, + "DefaultCores": 18, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 49152, + "Disks": [ + { + "SizeInGB": 2048, + "Count": 24, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4000, + "BaselineThroughputInMBps": 500.0, + "BaselineIops": 32000, + "MaximumBandwidthInMbps": 4000, + "MaximumThroughputInMBps": 500.0, + "MaximumIops": 32000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5a.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 8333, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5a.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 8333, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5n.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m4.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2000, + "BaselineThroughputInMBps": 250.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2000, + "MaximumThroughputInMBps": 250.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "is4gen.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 24576 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3750, + "Disks": [ + { + "SizeInGB": 3750, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2500, + "BaselineThroughputInMBps": 312.5, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5ad.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16, + 20, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3en.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 30000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6i.32xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48, + 50, + 52, + 54, + 56, + 58, + 60, + 62, + 64 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2850, + "Disks": [ + { + "SizeInGB": 1425, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14250, + "BaselineThroughputInMBps": 1781.25, + "BaselineIops": 50000, + "MaximumBandwidthInMbps": 14250, + "MaximumThroughputInMBps": 1781.25, + "MaximumIops": 50000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m6i.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 30000, + "BaselineThroughputInMBps": 3750.0, + "BaselineIops": 120000, + "MaximumBandwidthInMbps": 30000, + "MaximumThroughputInMBps": 3750.0, + "MaximumIops": 120000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5d.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 1048576 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "t2.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "g3.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 499712 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "M60", + "Manufacturer": "NVIDIA", + "Count": 4, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 32768 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "x1.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 999424 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1920, + "Disks": [ + { + "SizeInGB": 1920, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5ad.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32, + 36, + 40, + 44, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "im4gn.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 15000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 20000, + "BaselineThroughputInMBps": 2500.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 20000, + "MaximumThroughputInMBps": 2500.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c6i.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 59, + "Disks": [ + { + "SizeInGB": 59, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 315, + "BaselineThroughputInMBps": 39.375, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c6i.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2500, + "BaselineThroughputInMBps": 312.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c3.8xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.8 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 61440 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 640, + "Disks": [ + { + "SizeInGB": 320, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5ad.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 12, + 18, + 24, + 36, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13570, + "BaselineThroughputInMBps": 1696.25, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13570, + "MaximumThroughputInMBps": 1696.25, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "is4gen.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 15000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "inf1.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1190, + "BaselineThroughputInMBps": 148.75, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "InferenceAcceleratorInfo": { + "Accelerators": [ + { + "Count": 1, + "Name": "Inferentia", + "Manufacturer": "AWS" + } + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gn.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.88, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g5.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 450, + "Disks": [ + { + "SizeInGB": 450, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 850, + "BaselineThroughputInMBps": 106.25, + "BaselineIops": 3500, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 24576 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "z1d.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.0 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3170, + "BaselineThroughputInMBps": 396.25, + "BaselineIops": 13333, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5n.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 5376 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5d.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 50, + "Disks": [ + { + "SizeInGB": 50, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gd.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "x1e.32xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32, + 36, + 40, + 44, + 48, + 52, + 56, + 60, + 64 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 3997696 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3840, + "Disks": [ + { + "SizeInGB": 1920, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "i2.2xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 62464 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1600, + "Disks": [ + { + "SizeInGB": 800, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5ad.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 300, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2880, + "BaselineThroughputInMBps": 360.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5zn.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3170, + "BaselineThroughputInMBps": 396.25, + "BaselineIops": 13333, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c3.large", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.8 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 3840 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 32, + "Disks": [ + { + "SizeInGB": 16, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6i.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6gd.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 59, + "Disks": [ + { + "SizeInGB": 59, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 315, + "BaselineThroughputInMBps": 39.375, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5a.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5g.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4g", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g5g.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4g", + "Manufacturer": "NVIDIA", + "Count": 2, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 32768 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m2.2xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 35020 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 850, + "Disks": [ + { + "SizeInGB": 850, + "Count": 1, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "unsupported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 4000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5zn.3xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.5 + }, + "VCpuInfo": { + "DefaultVCpus": 12, + "DefaultCores": 6, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 49152 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t4g.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "p3.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 62464 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1750, + "BaselineThroughputInMBps": 218.75, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 1750, + "MaximumThroughputInMBps": 218.75, + "MaximumIops": 10000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "V100", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5d.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 600, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 18750, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c1.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 7168 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1680, + "Disks": [ + { + "SizeInGB": 420, + "Count": 4, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "unsupported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c4.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.9 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 3840 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 4000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "mac1.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64_mac" + ], + "SustainedClockSpeedInGhz": 3.2 + }, + "VCpuInfo": { + "DefaultVCpus": 12, + "DefaultCores": 6, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m4.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 450, + "BaselineThroughputInMBps": 56.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 450, + "MaximumThroughputInMBps": 56.25, + "MaximumIops": 3600 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r3.4xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 320, + "Disks": [ + { + "SizeInGB": 320, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2000, + "BaselineThroughputInMBps": 250.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2000, + "MaximumThroughputInMBps": 250.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6i.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6g.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5n.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5n.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 43008 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gn.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 28500, + "BaselineThroughputInMBps": 3562.5, + "BaselineIops": 120000, + "MaximumBandwidthInMbps": 28500, + "MaximumThroughputInMBps": 3562.5, + "MaximumIops": 120000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5n.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 950, + "Disks": [ + { + "SizeInGB": 950, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5a.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2880, + "BaselineThroughputInMBps": 360.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gn.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "h1.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 8000, + "Disks": [ + { + "SizeInGB": 2000, + "Count": 4, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "x2gd.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1048576 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g2.2xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.6 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 15360 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 60, + "Disks": [ + { + "SizeInGB": 60, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "K520", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 4096 + } + } + ], + "TotalGpuMemoryInMiB": 4096 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5n.9xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 36, + "DefaultCores": 18, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "p2.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 499712 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 32500, + "MaximumBandwidthInMbps": 5000, + "MaximumThroughputInMBps": 625.0, + "MaximumIops": 32500 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "K80", + "Manufacturer": "NVIDIA", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 12288 + } + } + ], + "TotalGpuMemoryInMiB": 98304 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5d.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 300, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 18750, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6gd.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c1.medium", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 1740 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 350, + "Disks": [ + { + "SizeInGB": 350, + "Count": 1, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "unsupported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 6, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6g.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5dn.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6g.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 630, + "BaselineThroughputInMBps": 78.75, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5d.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5ad.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 1200, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6300, + "BaselineThroughputInMBps": 787.5, + "BaselineIops": 26700, + "MaximumBandwidthInMbps": 6300, + "MaximumThroughputInMBps": 787.5, + "MaximumIops": 26700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g4ad.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.0 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 800, + "BaselineThroughputInMBps": 100.0, + "BaselineIops": 3400, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "Radeon Pro V520", + "Manufacturer": "AMD", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 8192 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gd.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5a.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16, + 20, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5ad.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 8333, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5a.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 6, + 12, + 18, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6780, + "BaselineThroughputInMBps": 847.5, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6780, + "MaximumThroughputInMBps": 847.5, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "d3.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 5940, + "Disks": [ + { + "SizeInGB": 1980, + "Count": 3, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 850, + "BaselineThroughputInMBps": 106.25, + "BaselineIops": 5000, + "MaximumBandwidthInMbps": 2800, + "MaximumThroughputInMBps": 350.0, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 15 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 15 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 3, + "Ipv6AddressesPerInterface": 3, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r6i.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "a1.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 800, + "BaselineThroughputInMBps": 100.0, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5ad.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 400, + "BaselineThroughputInMBps": 50.0, + "BaselineIops": 1600, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6i.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 30000, + "BaselineThroughputInMBps": 3750.0, + "BaselineIops": 120000, + "MaximumBandwidthInMbps": 30000, + "MaximumThroughputInMBps": 3750.0, + "MaximumIops": 120000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6gd.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "d2.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 62464 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 12288, + "Disks": [ + { + "SizeInGB": 2048, + "Count": 6, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6g.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14250, + "BaselineThroughputInMBps": 1781.25, + "BaselineIops": 50000, + "MaximumBandwidthInMbps": 14250, + "MaximumThroughputInMBps": 1781.25, + "MaximumIops": 50000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m4.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 750, + "BaselineThroughputInMBps": 93.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 750, + "MaximumThroughputInMBps": 93.75, + "MaximumIops": 6000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "g3.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "M60", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 8192 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6g.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14250, + "BaselineThroughputInMBps": 1781.25, + "BaselineIops": 50000, + "MaximumBandwidthInMbps": 14250, + "MaximumThroughputInMBps": 1781.25, + "MaximumIops": 50000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c6i.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 20000, + "BaselineThroughputInMBps": 2500.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 20000, + "MaximumThroughputInMBps": 2500.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5d.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "d3en.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 111840, + "Disks": [ + { + "SizeInGB": 13980, + "Count": 8, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2800, + "BaselineThroughputInMBps": 350.0, + "BaselineIops": 15000, + "MaximumBandwidthInMbps": 2800, + "MaximumThroughputInMBps": 350.0, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6g.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5d.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6g.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5n.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "a1.large", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 525, + "BaselineThroughputInMBps": 65.625, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g4ad.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.0 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 600, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 6700, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "Radeon Pro V520", + "Manufacturer": "AMD", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 8192 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t2.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 12, + "Ipv6AddressesPerInterface": 12, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5a.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 6600, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t2.micro", + "CurrentGeneration": true, + "FreeTierEligible": true, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 1024 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t3a.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 350, + "BaselineThroughputInMBps": 43.75, + "BaselineIops": 2000, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 6, + "Ipv6AddressesPerInterface": 6, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5dn.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r3.2xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 62464 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 160, + "Disks": [ + { + "SizeInGB": 160, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t3.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 12, + "Ipv6AddressesPerInterface": 12, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g4dn.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4", + "Manufacturer": "NVIDIA", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 131072 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5ad.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 600, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5ad.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 600, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "d3.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 47520, + "Disks": [ + { + "SizeInGB": 1980, + "Count": 24, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 5000, + "MaximumThroughputInMBps": 625.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 20, + "Ipv6AddressesPerInterface": 20, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g3.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "M60", + "Manufacturer": "NVIDIA", + "Count": 2, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "g4dn.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 900, + "Disks": [ + { + "SizeInGB": 900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4", + "Manufacturer": "NVIDIA", + "Count": 4, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 65536 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5d.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 200, + "Disks": [ + { + "SizeInGB": 200, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 3800, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 4, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 98304 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g4ad.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.0 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 8, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 1200, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3170, + "BaselineThroughputInMBps": 396.25, + "BaselineIops": 13333, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "15 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "15 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "Radeon Pro V520", + "Manufacturer": "AMD", + "Count": 2, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5a.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5dn.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 600, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 59, + "Disks": [ + { + "SizeInGB": 59, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 315, + "BaselineThroughputInMBps": 39.375, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m6i.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 20000, + "BaselineThroughputInMBps": 2500.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 20000, + "MaximumThroughputInMBps": 2500.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6i.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 30000, + "BaselineThroughputInMBps": 3750.0, + "BaselineIops": 120000, + "MaximumBandwidthInMbps": 30000, + "MaximumThroughputInMBps": 3750.0, + "MaximumIops": 120000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t4g.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 347, + "BaselineThroughputInMBps": 43.375, + "BaselineIops": 2000, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 6, + "Ipv6AddressesPerInterface": 6, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "z1d.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.0 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 6667, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5a.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 400, + "BaselineThroughputInMBps": 50.0, + "BaselineIops": 1600, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5n.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 15000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "u-12tb1.112xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.1 + }, + "VCpuInfo": { + "DefaultVCpus": 448, + "DefaultCores": 224, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 8, + 16, + 24, + 32, + 40, + 48, + 56, + 64, + 72, + 80, + 88, + 96, + 104, + 112, + 120, + 128, + 136, + 144, + 152, + 160, + 168, + 176, + 184, + 192, + 200, + 208, + 216, + 224 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 12582912 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "vt1.3xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 12, + "DefaultCores": 6, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 6 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 24576 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "3.12 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "3.12 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5b.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 60000, + "BaselineThroughputInMBps": 7500.0, + "BaselineIops": 260000, + "MaximumBandwidthInMbps": 60000, + "MaximumThroughputInMBps": 7500.0, + "MaximumIops": 260000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5b.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1250, + "BaselineThroughputInMBps": 156.25, + "BaselineIops": 5417, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 43333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3a.small", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 175, + "BaselineThroughputInMBps": 21.875, + "BaselineIops": 1000, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5a.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 800, + "BaselineThroughputInMBps": 100.0, + "BaselineIops": 3200, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 347, + "BaselineThroughputInMBps": 43.375, + "BaselineIops": 2000, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 6, + "Ipv6AddressesPerInterface": 6, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gn.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 760, + "BaselineThroughputInMBps": 95.0, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 16 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 16 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "h1.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2000, + "Disks": [ + { + "SizeInGB": 2000, + "Count": 1, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1750, + "BaselineThroughputInMBps": 218.75, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 1750, + "MaximumThroughputInMBps": 218.75, + "MaximumIops": 12000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6gd.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 237, + "Disks": [ + { + "SizeInGB": 237, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m3.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 15360 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 80, + "Disks": [ + { + "SizeInGB": 40, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 4000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5b.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 43333, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 43333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c3.4xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.8 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 30720 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 320, + "Disks": [ + { + "SizeInGB": 160, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2000, + "BaselineThroughputInMBps": 250.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2000, + "MaximumThroughputInMBps": 250.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t4g.small", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 174, + "BaselineThroughputInMBps": 21.75, + "BaselineIops": 1000, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5b.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 60000, + "BaselineThroughputInMBps": 7500.0, + "BaselineIops": 260000, + "MaximumBandwidthInMbps": 60000, + "MaximumThroughputInMBps": 7500.0, + "MaximumIops": 260000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "is4gen.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 49152 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 7500, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5d.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 600, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 237, + "Disks": [ + { + "SizeInGB": 237, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5ad.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 800, + "BaselineThroughputInMBps": 100.0, + "BaselineIops": 3200, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5g.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4g", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5dn.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5n.18xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 72, + "DefaultCores": 36, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g4dn.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 125, + "Disks": [ + { + "SizeInGB": 125, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 950, + "BaselineThroughputInMBps": 118.75, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 15616 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 475, + "Disks": [ + { + "SizeInGB": 475, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 425, + "BaselineThroughputInMBps": 53.125, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 425, + "MaximumThroughputInMBps": 53.125, + "MaximumIops": 3000 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5d.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5d.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c3.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.8 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 7680 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 80, + "Disks": [ + { + "SizeInGB": 40, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 4000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5d.18xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 72, + "DefaultCores": 36, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 147456 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5zn.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "i3.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 31232 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 950, + "Disks": [ + { + "SizeInGB": 950, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 850, + "BaselineThroughputInMBps": 106.25, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 850, + "MaximumThroughputInMBps": 106.25, + "MaximumIops": 6000 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t3.nano", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 512 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 43, + "BaselineThroughputInMBps": 5.375, + "BaselineIops": 250, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i2.4xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3200, + "Disks": [ + { + "SizeInGB": 800, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2000, + "BaselineThroughputInMBps": 250.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2000, + "MaximumThroughputInMBps": 250.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6g.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r6gd.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 950, + "Disks": [ + { + "SizeInGB": 950, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5a.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5b.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2500, + "BaselineThroughputInMBps": 312.5, + "BaselineIops": 10833, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 43333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "inf1.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1190, + "BaselineThroughputInMBps": 148.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "InferenceAcceleratorInfo": { + "Accelerators": [ + { + "Count": 1, + "Name": "Inferentia", + "Manufacturer": "AWS" + } + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c4.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.9 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 15360 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5ad.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1085, + "BaselineThroughputInMBps": 135.625, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6a.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13300, + "BaselineThroughputInMBps": 1662.5, + "BaselineIops": 53333, + "MaximumBandwidthInMbps": 13300, + "MaximumThroughputInMBps": 1662.5, + "MaximumIops": 53333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6gd.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1900, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5b.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 21667, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 43333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r4.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 62464 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1700, + "BaselineThroughputInMBps": 212.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 1700, + "MaximumThroughputInMBps": 212.5, + "MaximumIops": 12000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "u-6tb1.112xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.1 + }, + "VCpuInfo": { + "DefaultVCpus": 448, + "DefaultCores": 224, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 8, + 16, + 24, + 32, + 40, + 48, + 56, + 64, + 72, + 80, + 88, + 96, + 104, + 112, + 120, + 128, + 136, + 144, + 152, + 160, + 168, + 176, + 184, + 192, + 200, + 208, + 216, + 224 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 6291456 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6g.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14250, + "BaselineThroughputInMBps": 1781.25, + "BaselineIops": 50000, + "MaximumBandwidthInMbps": 14250, + "MaximumThroughputInMBps": 1781.25, + "MaximumIops": 50000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r3.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 31232 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 80, + "Disks": [ + { + "SizeInGB": 80, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 4000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6g.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 630, + "BaselineThroughputInMBps": 78.75, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5a.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32, + 36, + 40, + 44, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "d3en.6xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 24, + "DefaultCores": 12, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 167760, + "Disks": [ + { + "SizeInGB": 13980, + "Count": 12, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4000, + "BaselineThroughputInMBps": 500.0, + "BaselineIops": 25000, + "MaximumBandwidthInMbps": 4000, + "MaximumThroughputInMBps": 500.0, + "MaximumIops": 25000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "40 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "40 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5ad.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 12, + 18, + 24, + 36, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13750, + "BaselineThroughputInMBps": 1718.75, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13750, + "MaximumThroughputInMBps": 1718.75, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6i.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 20000, + "BaselineThroughputInMBps": 2500.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 20000, + "MaximumThroughputInMBps": 2500.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6i.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "u-6tb1.56xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.1 + }, + "VCpuInfo": { + "DefaultVCpus": 224, + "DefaultCores": 224, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 8, + 16, + 24, + 32, + 40, + 48, + 56, + 64, + 72, + 80, + 88, + 96, + 104, + 112, + 120, + 128, + 136, + 144, + 152, + 160, + 168, + 176, + 184, + 192, + 200, + 208, + 216, + 224 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 6291456 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "inf1.6xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 24, + "DefaultCores": 12, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 49152 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "InferenceAcceleratorInfo": { + "Accelerators": [ + { + "Count": 4, + "Name": "Inferentia", + "Manufacturer": "AWS" + } + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 18750, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6i.32xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48, + 50, + 52, + 54, + 56, + 58, + 60, + 62, + 64 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "h1.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 16000, + "Disks": [ + { + "SizeInGB": 2000, + "Count": 8, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5n.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 475, + "Disks": [ + { + "SizeInGB": 475, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5ad.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 600, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6g.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5d.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 400, + "Disks": [ + { + "SizeInGB": 400, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5g.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4g", + "Manufacturer": "NVIDIA", + "Count": 2, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 32768 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "t4g.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 12, + "Ipv6AddressesPerInterface": 12, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5d.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t4g.micro", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1024 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 87, + "BaselineThroughputInMBps": 10.875, + "BaselineIops": 500, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5n.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5dn.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5d.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6a.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4245, + "BaselineThroughputInMBps": 530.625, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 6666, + "MaximumThroughputInMBps": 833.333333, + "MaximumIops": 26667 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2850, + "Disks": [ + { + "SizeInGB": 1425, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14250, + "BaselineThroughputInMBps": 1781.25, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 14250, + "MaximumThroughputInMBps": 1781.25, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r4.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 31232 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 850, + "BaselineThroughputInMBps": 106.25, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 850, + "MaximumThroughputInMBps": 106.25, + "MaximumIops": 6000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5ad.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5d.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 600, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5a.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6300, + "BaselineThroughputInMBps": 787.5, + "BaselineIops": 26700, + "MaximumBandwidthInMbps": 6300, + "MaximumThroughputInMBps": 787.5, + "MaximumIops": 26700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t4g.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3.micro", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1024 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 87, + "BaselineThroughputInMBps": 10.875, + "BaselineIops": 500, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 3800, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 16000, + "BaselineThroughputInMBps": 2000.0, + "BaselineIops": 65000, + "MaximumBandwidthInMbps": 16000, + "MaximumThroughputInMBps": 2000.0, + "MaximumIops": 65000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "40 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "40 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 4, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 98304 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i2.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 31232 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 800, + "Disks": [ + { + "SizeInGB": 800, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 4000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5.9xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 36, + "DefaultCores": 18, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 73728 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "a1.metal", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m1.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 15360 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1680, + "Disks": [ + { + "SizeInGB": 420, + "Count": 4, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "unsupported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5zn.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "p4d.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.0 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1179648 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 8000, + "Disks": [ + { + "SizeInGB": 1000, + "Count": 8, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "4x 100 Gigabit", + "MaximumNetworkInterfaces": 60, + "MaximumNetworkCards": 4, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + }, + { + "NetworkCardIndex": 1, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + }, + { + "NetworkCardIndex": 2, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + }, + { + "NetworkCardIndex": 3, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 4 + }, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A100", + "Manufacturer": "NVIDIA", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 40960 + } + } + ], + "TotalGpuMemoryInMiB": 327680 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m4.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 65000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 65000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "z1d.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.0 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5n.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6i.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 5000, + "BaselineThroughputInMBps": 625.0, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x2gd.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 118, + "Disks": [ + { + "SizeInGB": 118, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 630, + "BaselineThroughputInMBps": 78.75, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5n.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5n.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 15000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "vt1.6xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 24, + "DefaultCores": 12, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 6, + 12 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 49152 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "6.25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "6.25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c4.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.9 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 7680 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 750, + "BaselineThroughputInMBps": 93.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 750, + "MaximumThroughputInMBps": 93.75, + "MaximumIops": 6000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t3a.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 12, + "Ipv6AddressesPerInterface": 12, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5d.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1900, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 16000, + "BaselineThroughputInMBps": 2000.0, + "BaselineIops": 65000, + "MaximumBandwidthInMbps": 16000, + "MaximumThroughputInMBps": 2000.0, + "MaximumIops": 65000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 24576 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "a1.2xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1750, + "BaselineThroughputInMBps": 218.75, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "d3.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 11880, + "Disks": [ + { + "SizeInGB": 1980, + "Count": 6, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1700, + "BaselineThroughputInMBps": 212.5, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 2800, + "MaximumThroughputInMBps": 350.0, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 15 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 15 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 5, + "Ipv6AddressesPerInterface": 5, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "h1.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 4000, + "Disks": [ + { + "SizeInGB": 2000, + "Count": 2, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5dn.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "z1d.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.0 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5ad.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1580, + "BaselineThroughputInMBps": 197.5, + "BaselineIops": 8333, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g4ad.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.0 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 8, + 16, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 1200, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6300, + "BaselineThroughputInMBps": 787.5, + "BaselineIops": 26667, + "MaximumBandwidthInMbps": 6300, + "MaximumThroughputInMBps": 787.5, + "MaximumIops": 26667 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "Radeon Pro V520", + "Manufacturer": "AMD", + "Count": 4, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 32768 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m4.10xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 40, + "DefaultCores": 20, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 163840 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4000, + "BaselineThroughputInMBps": 500.0, + "BaselineIops": 32000, + "MaximumBandwidthInMbps": 4000, + "MaximumThroughputInMBps": 500.0, + "MaximumIops": 32000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6a.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1061, + "BaselineThroughputInMBps": 132.625, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 6666, + "MaximumThroughputInMBps": 833.333333, + "MaximumIops": 26667 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5d.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6g.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "i3.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 499712 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 15200, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 8, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 65000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 65000 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "z1d.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.0 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 800, + "BaselineThroughputInMBps": 100.0, + "BaselineIops": 3333, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g3s.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 31232 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 850, + "BaselineThroughputInMBps": 106.25, + "BaselineIops": 5000, + "MaximumBandwidthInMbps": 850, + "MaximumThroughputInMBps": 106.25, + "MaximumIops": 5000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "M60", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 8192 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5b.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 20000, + "BaselineThroughputInMBps": 2500.0, + "BaselineIops": 86667, + "MaximumBandwidthInMbps": 20000, + "MaximumThroughputInMBps": 2500.0, + "MaximumIops": 86667 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r4.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 15616 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 425, + "BaselineThroughputInMBps": 53.125, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 425, + "MaximumThroughputInMBps": 53.125, + "MaximumIops": 3000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5a.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 12, + 18, + 24, + 36, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13570, + "BaselineThroughputInMBps": 1696.25, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13570, + "MaximumThroughputInMBps": 1696.25, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "p2.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 62464 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 750, + "BaselineThroughputInMBps": 93.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 750, + "MaximumThroughputInMBps": 93.75, + "MaximumIops": 6000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "K80", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 12288 + } + } + ], + "TotalGpuMemoryInMiB": 12288 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5dn.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gd.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 474, + "Disks": [ + { + "SizeInGB": 474, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5d.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 300, + "Disks": [ + { + "SizeInGB": 300, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5a.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5dn.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 600, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3en.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 5000, + "Disks": [ + { + "SizeInGB": 2500, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2307, + "BaselineThroughputInMBps": 288.3875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5n.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6800, + "BaselineThroughputInMBps": 850.0, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6800, + "MaximumThroughputInMBps": 850.0, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 900, + "Disks": [ + { + "SizeInGB": 900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 16000, + "BaselineThroughputInMBps": 2000.0, + "BaselineIops": 65000, + "MaximumBandwidthInMbps": 16000, + "MaximumThroughputInMBps": 2000.0, + "MaximumIops": 65000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 24576 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6g.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m3.2xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 30720 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 160, + "Disks": [ + { + "SizeInGB": 80, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6gn.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "x1.32xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32, + 36, + 40, + 44, + 48, + 52, + 56, + 60, + 64 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1998848 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3840, + "Disks": [ + { + "SizeInGB": 1920, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r4.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 37500, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 37500 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "x1e.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 120, + "Disks": [ + { + "SizeInGB": 120, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 3700, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 3700 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5n.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t2.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 6, + "Ipv6AddressesPerInterface": 6, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5n.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t2.small", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i2.8xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 6400, + "Disks": [ + { + "SizeInGB": 800, + "Count": 8, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "inf1.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 11, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 11 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "InferenceAcceleratorInfo": { + "Accelerators": [ + { + "Count": 16, + "Name": "Inferentia", + "Manufacturer": "AWS" + } + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1900, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "dl1.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.0 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 4000, + "Disks": [ + { + "SizeInGB": 1000, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "4x 100 Gigabit", + "MaximumNetworkInterfaces": 60, + "MaximumNetworkCards": 4, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + }, + { + "NetworkCardIndex": 1, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + }, + { + "NetworkCardIndex": 2, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + }, + { + "NetworkCardIndex": 3, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 4 + }, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "Gaudi HL-205", + "Manufacturer": "Habana", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 32768 + } + } + ], + "TotalGpuMemoryInMiB": 262144 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5dn.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 600, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13600, + "BaselineThroughputInMBps": 1700.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13600, + "MaximumThroughputInMBps": 1700.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 474, + "Disks": [ + { + "SizeInGB": 474, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5zn.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1564, + "BaselineThroughputInMBps": 195.5, + "BaselineIops": 6667, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3a.nano", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 512 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 45, + "BaselineThroughputInMBps": 5.625, + "BaselineIops": 250, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x1e.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1998848 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1920, + "Disks": [ + { + "SizeInGB": 1920, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5a.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3170, + "BaselineThroughputInMBps": 396.25, + "BaselineIops": 13300, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5d.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m3.medium", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 3840 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 4, + "Disks": [ + { + "SizeInGB": 4, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 6, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t2.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "vt1.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 6, + 12, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6i.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 15000, + "BaselineThroughputInMBps": 1875.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 15000, + "MaximumThroughputInMBps": 1875.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gn.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "d3.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 23760, + "Disks": [ + { + "SizeInGB": 1980, + "Count": 12, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2800, + "BaselineThroughputInMBps": 350.0, + "BaselineIops": 15000, + "MaximumBandwidthInMbps": 2800, + "MaximumThroughputInMBps": 350.0, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 15 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 15 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 7600, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 32500, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 32500 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "g4dn.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 900, + "Disks": [ + { + "SizeInGB": 900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gd.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 950, + "Disks": [ + { + "SizeInGB": 950, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5d.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "i3en.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2500, + "Disks": [ + { + "SizeInGB": 2500, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1153, + "BaselineThroughputInMBps": 144.2, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6i.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1250, + "BaselineThroughputInMBps": 156.25, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5dn.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "is4gen.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 12288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1875, + "Disks": [ + { + "SizeInGB": 1875, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1250, + "BaselineThroughputInMBps": 156.25, + "BaselineIops": 5000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c3.2xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.8 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 15360 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 160, + "Disks": [ + { + "SizeInGB": 80, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "im4gn.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 30000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "im4gn.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 7500, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5ad.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1085, + "BaselineThroughputInMBps": 135.625, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5ad.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 75, + "Disks": [ + { + "SizeInGB": 75, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 200, + "BaselineThroughputInMBps": 25.0, + "BaselineIops": 800, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5ad.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 300, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2880, + "BaselineThroughputInMBps": 360.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "d2.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 24576, + "Disks": [ + { + "SizeInGB": 2048, + "Count": 12, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2000, + "BaselineThroughputInMBps": 250.0, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 2000, + "MaximumThroughputInMBps": 250.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5a.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5ad.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 8, + 12, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1200, + "Disks": [ + { + "SizeInGB": 600, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3170, + "BaselineThroughputInMBps": 396.25, + "BaselineIops": 13300, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6i.32xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48, + 50, + 52, + 54, + 56, + 58, + 60, + 62, + 64 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1048576 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5dn.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 72, + "DefaultCores": 36, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 15200, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 8, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m4.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1000, + "BaselineThroughputInMBps": 125.0, + "BaselineIops": 8000, + "MaximumBandwidthInMbps": 1000, + "MaximumThroughputInMBps": 125.0, + "MaximumIops": 8000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6gn.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1235, + "BaselineThroughputInMBps": 154.375, + "BaselineIops": 5000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r5b.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 173333, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 173333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t3.small", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 174, + "BaselineThroughputInMBps": 21.75, + "BaselineIops": 1000, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3en.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1250, + "Disks": [ + { + "SizeInGB": 1250, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 576, + "BaselineThroughputInMBps": 72.1, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6a.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 32, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 20000, + "BaselineThroughputInMBps": 2500.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 20000, + "MaximumThroughputInMBps": 2500.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "37.5 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "im4gn.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 937, + "Disks": [ + { + "SizeInGB": 937, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1250, + "BaselineThroughputInMBps": 156.25, + "BaselineIops": 5000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g5.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 250, + "Disks": [ + { + "SizeInGB": 250, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 700, + "BaselineThroughputInMBps": 87.5, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 24576 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2300, + "BaselineThroughputInMBps": 287.5, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5d.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 100, + "Disks": [ + { + "SizeInGB": 100, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5n.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 72, + "DefaultCores": 36, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6g.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 315, + "BaselineThroughputInMBps": 39.375, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "a1.4xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5dn.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 300, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 15000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5n.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m3.large", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 7680 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 32, + "Disks": [ + { + "SizeInGB": 32, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6i.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 1048576 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6gd.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48, + 49, + 50, + 51, + 52, + 53, + 54, + 55, + 56, + 57, + 58, + 59, + 60, + 61, + 62, + 63, + 64 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5d.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6g.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "p3.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 499712 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "V100", + "Manufacturer": "NVIDIA", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 131072 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "z1d.6xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.0 + }, + "VCpuInfo": { + "DefaultVCpus": 24, + "DefaultCores": 12, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 900, + "Disks": [ + { + "SizeInGB": 900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6gd.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 2048 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 59, + "Disks": [ + { + "SizeInGB": 59, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 315, + "BaselineThroughputInMBps": 39.375, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m5ad.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2400, + "Disks": [ + { + "SizeInGB": 600, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5n.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6i.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6g.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g5g.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 4, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1188, + "BaselineThroughputInMBps": 148.5, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4g", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m6g.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 315, + "BaselineThroughputInMBps": 39.375, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g4dn.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 225, + "Disks": [ + { + "SizeInGB": 225, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m1.small", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 1740 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 160, + "Disks": [ + { + "SizeInGB": 160, + "Count": 1, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "unsupported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Low", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Low", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6g.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "p2.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 749568 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 65000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 65000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "K80", + "Manufacturer": "NVIDIA", + "Count": 16, + "MemoryInfo": { + "SizeInMiB": 12288 + } + } + ], + "TotalGpuMemoryInMiB": 196608 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m5d.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 300, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 18750, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 18750 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "t2.nano", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 512 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Low to Moderate", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m1.large", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 7680 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 840, + "Disks": [ + { + "SizeInGB": 420, + "Count": 2, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "supported", + "EncryptionSupport": "unsupported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 500, + "BaselineThroughputInMBps": 62.5, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 500, + "MaximumThroughputInMBps": 62.5, + "MaximumIops": 4000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "d3en.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 27960, + "Disks": [ + { + "SizeInGB": 13980, + "Count": 2, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 850, + "BaselineThroughputInMBps": 106.25, + "BaselineIops": 5000, + "MaximumBandwidthInMbps": 2800, + "MaximumThroughputInMBps": 350.0, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 3, + "Ipv6AddressesPerInterface": 3, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6a.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6666, + "BaselineThroughputInMBps": 833.333333, + "BaselineIops": 26667, + "MaximumBandwidthInMbps": 6666, + "MaximumThroughputInMBps": 833.333333, + "MaximumIops": 26667 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6i.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2500, + "BaselineThroughputInMBps": 312.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5a.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1085, + "BaselineThroughputInMBps": 135.625, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 2880, + "MaximumThroughputInMBps": 360.0, + "MaximumIops": 16000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "is4gen.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 30000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 20000, + "BaselineThroughputInMBps": 2500.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 20000, + "MaximumThroughputInMBps": 2500.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "d3en.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 335520, + "Disks": [ + { + "SizeInGB": 13980, + "Count": 24, + "Type": "hdd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "75 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6gd.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2850, + "Disks": [ + { + "SizeInGB": 1425, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14250, + "BaselineThroughputInMBps": 1781.25, + "BaselineIops": 50000, + "MaximumBandwidthInMbps": 14250, + "MaximumThroughputInMBps": 1781.25, + "MaximumIops": 50000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m6a.32xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 128, + "DefaultCores": 64, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32, + 64 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 26666, + "BaselineThroughputInMBps": 3333.333333, + "BaselineIops": 100000, + "MaximumBandwidthInMbps": 26666, + "MaximumThroughputInMBps": 3333.333333, + "MaximumIops": 100000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6i.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1250, + "BaselineThroughputInMBps": 156.25, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "z1d.3xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 4.0 + }, + "VCpuInfo": { + "DefaultVCpus": 12, + "DefaultCores": 6, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 450, + "Disks": [ + { + "SizeInGB": 450, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3800, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 16000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 16000 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "t3a.micro", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 1024 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 90, + "BaselineThroughputInMBps": 11.25, + "BaselineIops": 500, + "MaximumBandwidthInMbps": 2085, + "MaximumThroughputInMBps": 260.625, + "MaximumIops": 11800 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 2, + "Ipv6AddressesPerInterface": 2, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5n.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "f1.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 940, + "Disks": [ + { + "SizeInGB": 940, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 44000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 44000 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "FpgaInfo": { + "Fpgas": [ + { + "Name": "Virtex UltraScale (VU9P)", + "Manufacturer": "Xilinx", + "Count": 2, + "MemoryInfo": { + "SizeInMiB": 65536 + } + } + ], + "TotalFpgaMemoryInMiB": 131072 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6gd.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 32, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1900, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g4dn.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 225, + "Disks": [ + { + "SizeInGB": 225, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "p3dn.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1800, + "Disks": [ + { + "SizeInGB": 900, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "V100", + "Manufacturer": "NVIDIA", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 32768 + } + } + ], + "TotalGpuMemoryInMiB": 262144 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6g.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r6i.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 65536 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2500, + "BaselineThroughputInMBps": 312.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g2.8xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.6 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 61440 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 240, + "Disks": [ + { + "SizeInGB": 120, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "supported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "High", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "K520", + "Manufacturer": "NVIDIA", + "Count": 4, + "MemoryInfo": { + "SizeInMiB": 4096 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6g.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "d2.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.4 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 31232 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 6144, + "Disks": [ + { + "SizeInGB": 2048, + "Count": 3, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 750, + "BaselineThroughputInMBps": 93.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 750, + "MaximumThroughputInMBps": 93.75, + "MaximumIops": 6000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "i3en.6xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 24, + "DefaultCores": 12, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 15000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c5a.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 200, + "BaselineThroughputInMBps": 25.0, + "BaselineIops": 800, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13300 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6g.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 64, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 524288 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "r4.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 124928 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 3500, + "BaselineThroughputInMBps": 437.5, + "BaselineIops": 18750, + "MaximumBandwidthInMbps": 3500, + "MaximumThroughputInMBps": 437.5, + "MaximumIops": 18750 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "f1.16xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 64, + "DefaultCores": 32, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 999424 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3760, + "Disks": [ + { + "SizeInGB": 940, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14000, + "BaselineThroughputInMBps": 1750.0, + "BaselineIops": 75000, + "MaximumBandwidthInMbps": 14000, + "MaximumThroughputInMBps": 1750.0, + "MaximumIops": 75000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "FpgaInfo": { + "Fpgas": [ + { + "Name": "Virtex UltraScale (VU9P)", + "Manufacturer": "Xilinx", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 65536 + } + } + ], + "TotalFpgaMemoryInMiB": 524288 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m6a.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 16, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "x1e.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 499712 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 480, + "Disks": [ + { + "SizeInGB": 480, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1750, + "BaselineThroughputInMBps": 218.75, + "BaselineIops": 10000, + "MaximumBandwidthInMbps": 1750, + "MaximumThroughputInMBps": 218.75, + "MaximumIops": 10000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6gd.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 48, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16, + 17, + 18, + 19, + 20, + 21, + 22, + 23, + 24, + 25, + 26, + 27, + 28, + 29, + 30, + 31, + 32, + 33, + 34, + 35, + 36, + 37, + 38, + 39, + 40, + 41, + 42, + 43, + 44, + 45, + 46, + 47, + 48 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 2850, + "Disks": [ + { + "SizeInGB": 1425, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 14250, + "BaselineThroughputInMBps": 1781.25, + "BaselineIops": 50000, + "MaximumBandwidthInMbps": 14250, + "MaximumThroughputInMBps": 1781.25, + "MaximumIops": 50000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "g5.48xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.3 + }, + "VCpuInfo": { + "DefaultVCpus": 192, + "DefaultCores": 96, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 7600, + "Disks": [ + { + "SizeInGB": 3800, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "A10G", + "Manufacturer": "NVIDIA", + "Count": 8, + "MemoryInfo": { + "SizeInMiB": 24576 + } + } + ], + "TotalGpuMemoryInMiB": 196608 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6gd.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 8192 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 118, + "Disks": [ + { + "SizeInGB": 118, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 630, + "BaselineThroughputInMBps": 78.75, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "m6a.48xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.6 + }, + "VCpuInfo": { + "DefaultVCpus": 192, + "DefaultCores": 96, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 8, + 12, + 16, + 20, + 24, + 28, + 32, + 64, + 96 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 40000, + "BaselineThroughputInMBps": 5000.0, + "BaselineIops": 160000, + "MaximumBandwidthInMbps": 40000, + "MaximumThroughputInMBps": 5000.0, + "MaximumIops": 160000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "i3.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.3 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 4, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 62464 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 1900, + "Disks": [ + { + "SizeInGB": 1900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1700, + "BaselineThroughputInMBps": 212.5, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 1700, + "MaximumThroughputInMBps": 212.5, + "MaximumIops": 12000 + }, + "NvmeSupport": "supported" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r5dn.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 8, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 600, + "Disks": [ + { + "SizeInGB": 300, + "Count": 2, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 15000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 15000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5n.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "i3en.metal", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": true, + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 60000, + "Disks": [ + { + "SizeInGB": 7500, + "Count": 8, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "r6i.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 1, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 650, + "BaselineThroughputInMBps": 81.25, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 12.5 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6i.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 15000, + "BaselineThroughputInMBps": 1875.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 15000, + "MaximumThroughputInMBps": 1875.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m2.xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 17510 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 420, + "Disks": [ + { + "SizeInGB": 420, + "Count": 1, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "unsupported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c6gd.large", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 2, + "DefaultCores": 2, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 4096 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 118, + "Disks": [ + { + "SizeInGB": 118, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 630, + "BaselineThroughputInMBps": 78.75, + "BaselineIops": 3600, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 3 + } + ], + "Ipv4AddressesPerInterface": 10, + "Ipv6AddressesPerInterface": 10, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5d.9xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 36, + "DefaultCores": 18, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 73728 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 900, + "Disks": [ + { + "SizeInGB": 900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m6g.2xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 8, + "DefaultCores": 8, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 2375, + "BaselineThroughputInMBps": 296.875, + "BaselineIops": 12000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "c5n.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 10752 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 1150, + "BaselineThroughputInMBps": 143.75, + "BaselineIops": 6000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "c6i.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 98304 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 15000, + "BaselineThroughputInMBps": 1875.0, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 15000, + "MaximumThroughputInMBps": 1875.0, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "18.75 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "is4gen.medium", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 6144 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 937, + "Disks": [ + { + "SizeInGB": 937, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 625, + "BaselineThroughputInMBps": 78.125, + "BaselineIops": 2500, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 25 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "cc2.8xlarge", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.6 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2 + }, + "MemoryInfo": { + "SizeInMiB": 61952 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3360, + "Disks": [ + { + "SizeInGB": 840, + "Count": 4, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "unsupported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "c5.18xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.4 + }, + "VCpuInfo": { + "DefaultVCpus": 72, + "DefaultCores": 36, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 147456 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "25 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "p3.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.7 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 249856 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 7000, + "BaselineThroughputInMBps": 875.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 7000, + "MaximumThroughputInMBps": 875.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "supported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "V100", + "Manufacturer": "NVIDIA", + "Count": 4, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 65536 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "m1.medium", + "CurrentGeneration": false, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs", + "instance-store" + ], + "SupportedVirtualizationTypes": [ + "hvm", + "paravirtual" + ], + "BareMetal": false, + "Hypervisor": "xen", + "ProcessorInfo": { + "SupportedArchitectures": [ + "i386", + "x86_64" + ] + }, + "VCpuInfo": { + "DefaultVCpus": 1, + "DefaultCores": 1, + "DefaultThreadsPerCore": 1 + }, + "MemoryInfo": { + "SizeInMiB": 3788 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 410, + "Disks": [ + { + "SizeInGB": 410, + "Count": 1, + "Type": "hdd" + } + ], + "NvmeSupport": "unsupported" + }, + "EbsInfo": { + "EbsOptimizedSupport": "unsupported", + "EncryptionSupport": "unsupported", + "NvmeSupport": "unsupported" + }, + "NetworkInfo": { + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Moderate", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 6, + "Ipv6AddressesPerInterface": 0, + "Ipv6Supported": false, + "EnaSupport": "unsupported", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios" + ] + }, + { + "InstanceType": "u-9tb1.112xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.1 + }, + "VCpuInfo": { + "DefaultVCpus": 448, + "DefaultCores": 224, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 8, + 16, + 24, + 32, + 40, + 48, + 56, + 64, + 72, + 80, + 88, + 96, + 104, + 112, + 120, + 128, + 136, + 144, + 152, + 160, + 168, + 176, + 184, + 192, + 200, + 208, + 216, + 224 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 9437184 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5a.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 12, + 18, + 24, + 36, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 393216 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 13750, + "BaselineThroughputInMBps": 1718.75, + "BaselineIops": 60000, + "MaximumBandwidthInMbps": 13750, + "MaximumThroughputInMBps": 1718.75, + "MaximumIops": 60000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "20 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r6i.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 262144 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 10000, + "BaselineThroughputInMBps": 1250.0, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 10000, + "MaximumThroughputInMBps": 1250.0, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "12.5 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "m5a.12xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 48, + "DefaultCores": 24, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 6, + 12, + 18, + 24 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 196608 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 6780, + "BaselineThroughputInMBps": 847.5, + "BaselineIops": 30000, + "MaximumBandwidthInMbps": 6780, + "MaximumThroughputInMBps": 847.5, + "MaximumIops": 30000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "10 Gigabit", + "MaximumNetworkInterfaces": 8 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g4dn.8xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 32, + "DefaultCores": 16, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 131072 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 900, + "Disks": [ + { + "SizeInGB": 900, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 9500, + "BaselineThroughputInMBps": 1187.5, + "BaselineIops": 40000, + "MaximumBandwidthInMbps": 9500, + "MaximumThroughputInMBps": 1187.5, + "MaximumIops": 40000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "50 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g5g.4xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "arm64" + ], + "SustainedClockSpeedInGhz": 2.5 + }, + "VCpuInfo": { + "DefaultVCpus": 16, + "DefaultCores": 16, + "DefaultThreadsPerCore": 1, + "ValidCores": [ + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 11, + 12, + 13, + 14, + 15, + 16 + ], + "ValidThreadsPerCore": [ + 1 + ] + }, + "MemoryInfo": { + "SizeInMiB": 32768 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 4750, + "BaselineThroughputInMBps": 593.75, + "BaselineIops": 20000, + "MaximumBandwidthInMbps": 4750, + "MaximumThroughputInMBps": 593.75, + "MaximumIops": 20000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 8, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 30, + "Ipv6AddressesPerInterface": 30, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "T4g", + "Manufacturer": "NVIDIA", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 16384 + } + } + ], + "TotalGpuMemoryInMiB": 16384 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "uefi" + ] + }, + { + "InstanceType": "t3a.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 2.2 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": false, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 695, + "BaselineThroughputInMBps": 86.875, + "BaselineIops": 4000, + "MaximumBandwidthInMbps": 2780, + "MaximumThroughputInMBps": 347.5, + "MaximumIops": 15700 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 5 Gigabit", + "MaximumNetworkInterfaces": 4 + } + ], + "Ipv4AddressesPerInterface": 15, + "Ipv6AddressesPerInterface": 15, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": false + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "partition", + "spread" + ] + }, + "HibernationSupported": true, + "BurstablePerformanceSupported": true, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": true, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "r5dn.24xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.1 + }, + "VCpuInfo": { + "DefaultVCpus": 96, + "DefaultCores": 48, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2, + 4, + 6, + 8, + 10, + 12, + 14, + 16, + 18, + 20, + 22, + 24, + 26, + 28, + 30, + 32, + 34, + 36, + 38, + 40, + 42, + 44, + 46, + 48 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 786432 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 3600, + "Disks": [ + { + "SizeInGB": 900, + "Count": 4, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 19000, + "BaselineThroughputInMBps": 2375.0, + "BaselineIops": 80000, + "MaximumBandwidthInMbps": 19000, + "MaximumThroughputInMBps": 2375.0, + "MaximumIops": 80000 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "100 Gigabit", + "MaximumNetworkInterfaces": 15 + } + ], + "Ipv4AddressesPerInterface": 50, + "Ipv6AddressesPerInterface": 50, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": true, + "EfaInfo": { + "MaximumEfaInterfaces": 1 + }, + "EncryptionInTransitSupported": true + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": true, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + }, + { + "InstanceType": "g4ad.xlarge", + "CurrentGeneration": true, + "FreeTierEligible": false, + "SupportedUsageClasses": [ + "on-demand", + "spot" + ], + "SupportedRootDeviceTypes": [ + "ebs" + ], + "SupportedVirtualizationTypes": [ + "hvm" + ], + "BareMetal": false, + "Hypervisor": "nitro", + "ProcessorInfo": { + "SupportedArchitectures": [ + "x86_64" + ], + "SustainedClockSpeedInGhz": 3.0 + }, + "VCpuInfo": { + "DefaultVCpus": 4, + "DefaultCores": 2, + "DefaultThreadsPerCore": 2, + "ValidCores": [ + 2 + ], + "ValidThreadsPerCore": [ + 1, + 2 + ] + }, + "MemoryInfo": { + "SizeInMiB": 16384 + }, + "InstanceStorageSupported": true, + "InstanceStorageInfo": { + "TotalSizeInGB": 150, + "Disks": [ + { + "SizeInGB": 150, + "Count": 1, + "Type": "ssd" + } + ], + "NvmeSupport": "required" + }, + "EbsInfo": { + "EbsOptimizedSupport": "default", + "EncryptionSupport": "supported", + "EbsOptimizedInfo": { + "BaselineBandwidthInMbps": 400, + "BaselineThroughputInMBps": 50.0, + "BaselineIops": 1700, + "MaximumBandwidthInMbps": 3170, + "MaximumThroughputInMBps": 396.25, + "MaximumIops": 13333 + }, + "NvmeSupport": "required" + }, + "NetworkInfo": { + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2, + "MaximumNetworkCards": 1, + "DefaultNetworkCardIndex": 0, + "NetworkCards": [ + { + "NetworkCardIndex": 0, + "NetworkPerformance": "Up to 10 Gigabit", + "MaximumNetworkInterfaces": 2 + } + ], + "Ipv4AddressesPerInterface": 4, + "Ipv6AddressesPerInterface": 4, + "Ipv6Supported": true, + "EnaSupport": "required", + "EfaSupported": false, + "EncryptionInTransitSupported": true + }, + "GpuInfo": { + "Gpus": [ + { + "Name": "Radeon Pro V520", + "Manufacturer": "AMD", + "Count": 1, + "MemoryInfo": { + "SizeInMiB": 8192 + } + } + ], + "TotalGpuMemoryInMiB": 8192 + }, + "PlacementGroupInfo": { + "SupportedStrategies": [ + "cluster", + "partition", + "spread" + ] + }, + "HibernationSupported": false, + "BurstablePerformanceSupported": false, + "DedicatedHostsSupported": false, + "AutoRecoverySupported": false, + "SupportedBootModes": [ + "legacy-bios", + "uefi" + ] + } +] diff --git a/scripts/docker_images.py b/scripts/docker_images.py new file mode 100644 index 00000000..2d80ad7e --- /dev/null +++ b/scripts/docker_images.py @@ -0,0 +1,783 @@ +import datetime +import json +import os +from os import environ +from pathlib import Path + +import questionary +import requests +from rich import print +from rich.pretty import pprint + +from lib import cmd +from lib.config.config import load_envs +from lib.config_files import load_file, load_yaml, secret_value_from_yaml, write_yaml +from scripts.k8s_utils import get_env_namespaces, namespace_release +from scripts.releases import active_releases, all_releases + +from . import github + +VALID_PROFILES = [ + "base", + "dbt-snowflake", + "dbt-redshift", + "dbt-bigquery", + "dbt-databricks", +] + +# Right now, only Airflow has a "local" version. We don't want to apply +# these to everything else. +VALID_LOCAL_PROFILES = [ + "base-local", + "dbt-snowflake-local", + "dbt-redshift-local", + "dbt-bigquery-local", + "dbt-databricks-local", +] + +# The local profiles are no longer in use, so commenting out this disables +# them. +HAS_LOCAL_PROFILES = [] # "airflow/airflow", + +ARM64_IMAGES = sorted({"core/rabbitmq", "prometheus/kube-webhook-certgen"}) + +COMMON_REQS_IMAGES = { + "code-server/code-server": "/profiles/{profile}/python/", + "code-server/dbt-core-interface": "/profiles/{profile}/", + "ci/basic": "/profiles/{profile}/", + "ci/airflow": "/profiles/{profile}/", + "airflow/airflow": "/profiles/{profile}/", +} + +COMMON_ADAPTERS_APP = { + "code-server/code-server": "/datacoves/", + "code-server/dbt-core-interface": "/src/bin/", + "ci/basic": "/", + "ci/airflow": "/", + "airflow/airflow": "/", +} + +COMMON_AIRFLOW_PROVIDERS = { + "ci/airflow": "/", + "airflow/airflow": "/", +} + +ROOT_REQS_IMAGES = {"ci/multiarch": "/requirements.txt"} + +GITHUB_ACTION_IMAGES = { + # Image references removed since balboa's dbt project is now using the latest major tag + # instead of specific ones. We kept the feature in case in the future we use it again + # + # "ci-airflow-dbt-snowflake": { + # "used_by": {"owner": "datacoves", "name": "balboa", "default_branch": "main"}, + # }, + # "ci-basic-dbt-snowflake": { + # "used_by": {"owner": "datacoves", "name": "balboa", "default_branch": "main"}, + # }, +} + +# Images that could be used as a starting point by others. These images will have additional tags +EXTENSIBLE_IMAGES = [ + "ci-basic", + "ci-airflow", + "airflow-airflow", +] + +# These are the different lists of images that we have in a release. +# Each one of these is a key name for the release manifest yaml which +# is a list of images. +# +# We also have 'images' and 'ci_images' which are dictionaries mapping +# image names to versions. Those are not on this list because they are +# handled differently. +RELEASE_IMAGE_KEYS = ( + "airbyte_images", + "airflow_images", + "superset_images", + "datahub_images", + "elastic_images", + "kafka_images", + "neo4j_images", + "postgresql_images", + "observability_images", + "core_images", +) + + +def latest_version_tags(version, repos, name=None): + pswd = environ.get("DOCKER_PASSWORD") or secret_value_from_yaml( + Path("secrets/cli.secret.yaml"), "docker_password" + ) + images = {} + + major = version.split(".")[0] + + for repo in repos: + print(f"Getting tags for {repo}...") + + tags_response = json.loads( + cmd.output("scripts/shell/docker_tags.sh", pswd, repo) + ) + tags = tags_response["tags"] + major_version_tags = sorted( + [ + tag + for tag in tags + if tag.startswith(major + ".") or (name and tag.startswith(name + ".")) + ] + ) + images[repo] = major_version_tags[-1] if major_version_tags else version + + return images + + +def _repos_including_profiles(dcprefix, paths): + """ + Given a list of image paths, return a list of docker image repos including profiles. + # For example, if the path is "code-server/code-server", we'll add "code-server/code-server/base" + # and "src/datacoves/dbt-snowflake/dbt-snowflake" to the list. + """ + new_paths = [] + for path in paths: + if path in COMMON_REQS_IMAGES: + for profile in VALID_PROFILES: + new_paths.append(path + "/" + profile) + + if path in HAS_LOCAL_PROFILES: + for profile in VALID_LOCAL_PROFILES: + new_paths.append(path + "/" + profile) + else: + new_paths.append(path) + return [dcprefix + path.replace("/", "-") for path in sorted(new_paths)] + + +def _private_image_paths(): + private_image_paths = [ + str(s)[len("src/") : -len("/Dockerfile")] + for s in Path().glob("src/*/*/Dockerfile") + if "src/ci" not in str(s) + ] + return private_image_paths + + +def repos_from_paths(dcprefix="datacovesprivate/"): + image_paths = _private_image_paths() + return _repos_including_profiles(dcprefix, image_paths) + + +def _public_image_paths(): + return [ + str(s)[len("src/") : -len("/Dockerfile")] + for s in Path().glob("src/*/*/Dockerfile") + if "src/ci" in str(s) and "src/ci/multiarch/" not in str(s) + ] + + +def public_repos_from_paths(dcprefix="datacoves/"): + return _repos_including_profiles(dcprefix, _public_image_paths()) + + +def images_from_paths(default_tag, dcprefix="datacovesprivate/"): + repos = repos_from_paths(dcprefix=dcprefix) + return {repo: default_tag for repo in repos} + + +def parse_ci_image_list(release_images): + image_list = [] + for image_name, tag in release_images.items(): + image_list.append(f"{image_name}:{tag}") + if any( + [image_name.split("/")[-1].startswith(ext) for ext in EXTENSIBLE_IMAGES] + ): + version = tag.split("-")[0] + major_minor = ".".join(version.split(".")[:2]) + major = major_minor.split(".")[0] + image_list.append(f"{image_name}:{major}") + image_list.append(f"{image_name}:{major_minor}") + + return image_list + + +def release_images(release_name, exclude_patterns=None): + """Returns a release's docker images.""" + release_file = Path("releases") / (release_name + ".yaml") + assert ( + release_file.exists() + ), f"Release '{release_file}' referenced by environment not found" + release = load_yaml(release_file) + images = [f"{repo}:{tag}" for repo, tag in release["images"].items()] + + for image_key in RELEASE_IMAGE_KEYS: + images += release.get(image_key, []) + + dc_images = release["images"].copy() + dc_images.update(release["ci_images"]) + + images += parse_ci_image_list(dc_images) + + if exclude_patterns: + filtered = [] + for image in images: + # If any of the exclude_patterns matches, don't add the image to the list. + if not any(pattern in image for pattern in exclude_patterns): + filtered.append(image) + images = filtered + + return images + + +def make_new_release_from_old( + old_release: str, new_release: str, commit: str, change_versions: dict +): + """This takes an old release, such as "3.12345", and a new release, + such as "3.64321", and a dictionary mapping image names to version + numbers. The new release will be created, using the images from the + old release and replacing any versions in 'change_version', thus + allowing a hotfix. The resulting release is pushed up to GitHub. + + 'commit' will probably always be the output from: + cmd.output("git rev-parse HEAD").strip() + + This 'seems' like it belongs in 'releases', however this file depends + on both 'releases' and 'RELEASE_IMAGE_KEYS' from this module. If + I put this code in 'releases', I'll make a circular dependency. + So it's gotta go in here. Sorry! + """ + + # Load old release, alter images, then save to new release + new_release_yaml = load_yaml(f"releases/{old_release}.yaml") + + # Change commit + new_release_yaml["commit"] = commit + new_release_yaml["name"] = new_release + + # Check for images in certain keys + for key in RELEASE_IMAGE_KEYS + ( + "ci_images", + "images", + ): + if key in new_release_yaml: + if isinstance(new_release_yaml[key], list): + for i in range(0, len(new_release_yaml[key])): + (image, version) = new_release_yaml[key][i].split(":") + + if image in change_versions: + new_release_yaml[key][i] = f"{image}:{change_versions[image]}" + + else: # dict + for image in new_release_yaml[key].keys(): + if image in change_versions: + new_release_yaml[key][image] = change_versions[image] + + # We should have a finished new_release_yaml, let's save it. + write_yaml(f"releases/{new_release}.yaml", new_release_yaml) + + # Upload it + github.Releaser().create_release( + new_release, new_release_yaml["commit"], is_prerelease=False + ) + + +def replacable_images(release_name: str) -> tuple: + """Replacable images are images we can replace in a hotfix. There are + two flavors of replacable images; images we build and images we use + from elsewhere. As such, this returns two lists in a tuple. + + The first list will be a list of images that we are able to build. + The second list will be images that we don't build, but might want to + change the version number on it for a hotfix release or other purposes. + """ + + images_in_release = release_images(release_name) + + # Grab dictionary of all buildables. The keys will be a set of available + # buildable images. + all_buildable = get_buildable_image_map() + + # Split 'images_in_release' into buildable vs. other + buildable = set() + other = set() + + for image in images_in_release: + (image_name, version) = image.split(":") + + if image_name in all_buildable: + buildable.add(image_name) + else: + other.add(image_name) + + return (sorted(buildable), sorted(other)) + + +def releases_images(releases): + return {image for release in releases for image in release_images(release)} + + +def environment_images(cluster_domain, env, exclude=None): + """Returns an environment's docker images.""" + cluster_yaml = load_file(f"config/{cluster_domain}/cluster-params.yaml") + images = release_images(env["release"], exclude_patterns=exclude) + images += cluster_yaml.get("extra_images", []) + return images + + +def cluster_images(cluster_domain, exclude=None): + """Returns a cluster's docker images, from all the cluster's environments.""" + cluster_params = load_file(f"config/{cluster_domain}/cluster-params.yaml") + envs = load_envs(cluster_domain) + images = set( + cluster_params.get("extra_images", []) + + release_images(cluster_params["release"], exclude_patterns=exclude) + ) + for env in envs.values(): + for image in environment_images(cluster_domain, env, exclude=exclude): + images.add(image) + return images + + +def current_images(exclude=None): + """Returns the list of already installed images in the cluster""" + releases = set() + core_release = namespace_release("core") + if core_release: + releases.add(core_release) + for env in get_env_namespaces(): + env_release = namespace_release(env) + if env_release: + releases.add(env_release) + images = set() + for release in releases: + for image in release_images(release, exclude_patterns=exclude): + images.add(image) + return images + + +def get_extra_tags(images): + """Returns extra images if extensible images found""" + extra_images = set() + for image in images: + name, tag = image.split(":") + repo_name = name.split("/")[-1] + if any([repo_name.startswith(ext) for ext in EXTENSIBLE_IMAGES]): + # latest tag + extra_images.add(f"{name}:latest") + # major.minor.patch version tag + version = tag.split("-")[0] + extra_images.add(f"{name}:{version}") + # major.minor version tag + major_minor = ".".join(version.split(".")[:2]) + extra_images.add(f"{name}:{major_minor}") + # major version tag + major = major_minor.split(".")[0] + extra_images.add(f"{name}:{major}") + return extra_images + + +def deploy_images(cluster_domain, source_repo=""): + """Pull docker images from datacoves docker hub repo, retags them and pushes them to the cluster registry configured in {cluster_domain}""" + cluster_params = load_file(f"config/{cluster_domain}/cluster-params.yaml") + target_registry = cluster_params.get("docker_registry") + exclude = cluster_params.get("exclude_image_patterns") + if not target_registry: + print(f"Aborting, {cluster_domain} is not using a custom container registry.") + return + + images = cluster_images(cluster_domain, exclude=exclude) - current_images( + exclude=exclude + ) + + extra_images = get_extra_tags(images) + + images = sorted(images.union(extra_images)) + + if not images: + print("No images to deploy, current cluster already using the release images.") + return + + print("Images to pull and push:") + pprint(images, expand_all=True) + if not questionary.confirm("Confirm?").ask(): + return + + _login(source_repo) + _pull_images(images, source_repo) + + _retag_images(images, source_repo, target_registry) + + _login(target_registry) + _push_images(images, target_registry) + + +def _pull_images(images, repo=None): + prefix = f"{repo}/" if repo else "" + for image in images: + cmd.sh(f"docker pull {prefix}{image}") + + +def _push_images(images, repo=None): + prefix = f"{repo}/" if repo else "" + for image in images: + try: + cmd.sh(f"docker push {prefix}{image}") + except Exception: + pass + + +def _retag_images(images, source_repo, target_repo): + src = f"{source_repo}/" if source_repo else "" + tgt = f"{target_repo}/" if target_repo else "" + for image in images: + cmd.sh(f"docker tag {src}{image} {tgt}{image}") + print(f"tagged {src}{image} -> {tgt}{image}") + + +def _login(registry): + print(f"Login to {registry} docker registry...") + cmd.sh(f"docker login {registry}") + + +def requires_target(image_path): + """Returns True if the image requires a target profile""" + return image_path in COMMON_REQS_IMAGES + + +def build_and_push( + image_path, + repo="", + target=None, + push=True, + major_minor=None, + custom_tag=None, + gen_latest=False, +) -> str: + """Build images locally and tags them using the content of the + .version.yml file. Returns the full version tag.""" + if not major_minor: + major_minor = load_yaml(".version.yml")["version"] + sha1 = cmd.output("git rev-parse HEAD")[:8] + patch = datetime.datetime.now(datetime.timezone.utc).strftime("%Y%m%d%H%M") + + _copy_common_files(image_path, target) + image = image_path.replace("/", "-") + target_arg = "" + if target: + image += f"-{target}" + target_arg = f" --target {target}" + if repo: + repo += "/" + version = f"{major_minor}.{patch}" + sha1_tag = f"{repo}{image}:{version}-{sha1}" + tags_str = _build_image_tags( + repo, custom_tag, sha1, patch, image, sha1_tag, gen_latest + ) + push_flag = "--push" if push else "--load" + + platform = "linux/amd64" + if image_path in ARM64_IMAGES: + platform += ",linux/arm64" + + command = ( + "echo $DOCKER_PASSWORD | docker login --username datacovesprivate --password-stdin ; " + + "docker context create tls-environment ; " + + "docker buildx create --use tls-environment ; " + + f"docker buildx build src/{image_path}/ {tags_str}{target_arg} --platform={platform} {push_flag} --provenance=false" + ) + + print(command) + cmd.sh(command, env=os.environ.copy()) + + if image in GITHUB_ACTION_IMAGES: + update_github(repo, image, sha1_tag) + + return tags_str.split(":", 1)[1] + + +def build_and_push_images(images: list, target=None, custom_tag: str = None) -> dict: + """Takes a list of image names without version numbers, of a format + such as: + + somerepo/imagename + + ... and figures out how to build them. Image name may have a profile + as part of it. + + This is, essentially, build_and_push but by a list of images rather + than by path. + + target and custom_tag are passed as-is into build_and_push + + This then returns a dictionary mapping image name to version + """ + + all_images = get_buildable_image_map() + + # Do a validation pass + for image in images: + if image not in all_images: + raise RuntimeError(f"Image {image} not recognized as a buildable image") + + # Map of image name to new version created + new_images = {} + + # Extensibles to publish + extensible_groups = set() + + # Now build them + for image in images: + (repo, image_name) = image.split("/", 1) + new_images[image] = build_and_push( + all_images[image]["image"], + repo=repo, + target=( + all_images[image]["profile"] if all_images[image]["profile"] else None + ), + push=True, + custom_tag=custom_tag, + major_minor=None, + ) + + # Do we need to build the 'extensible' extra tags? + for ei in EXTENSIBLE_IMAGES: + if image_name.startswith(ei): + # Yes, yes we do. + extensible_groups.add(ei) + + # Extensible images + if extensible_groups: + for group in extensible_groups: + publish_extensible_images(group, "False", False) + + return new_images + + +def _copy_common_files(image_path, target): + """ + Copies common files to the image directory. + """ + common_reqs_path = COMMON_REQS_IMAGES.get(image_path) + + if common_reqs_path: + if not target: + print(f"Aborting, {image_path} requires specifying a target profile.") + exit() + + # If this is a "local" profile, let's copy its non-local equivalent + # file first. + source = target + + if target in VALID_LOCAL_PROFILES: + # Strip -local off the end + source = target[:-6] + + if target != "base": + cmd.run( + f"cp src/common/requirements/base.txt " + f"src/{image_path}{common_reqs_path.format(profile=target)}" + ) + cmd.run( + f"cp src/common/requirements/{source}.txt " + f"src/{image_path}{common_reqs_path.format(profile=target)}" + ) + print("Common requirements copied successfully.") + + common_adapters_app = COMMON_ADAPTERS_APP.get(image_path) + if common_adapters_app: + cmd.run( + f"cp src/common/set_adapters_app.sh src/{image_path}{common_adapters_app}" + ) + print("set_adapters_app.sh copied successfully.") + + common_airflow_providers = COMMON_AIRFLOW_PROVIDERS.get(image_path) + if common_airflow_providers: + cmd.run(f"cp -r src/common/providers src/{image_path}{common_adapters_app}") + print("providers copied successfully.") + cmd.run( + f"cp -r src/common/plugins/ src/{image_path}{common_adapters_app}/plugins" + ) + print("Airflow plugins copied successfully") + + root_reqs_path = ROOT_REQS_IMAGES.get(image_path) + if root_reqs_path: + cmd.run(f"cp requirements.txt src/{image_path}{root_reqs_path}") + print("root requirements copied successfully.") + + +def _build_image_tags(repo, custom_tag, sha1, patch, image, sha1_tag, gen_latest=False): + """Returns image tags to be used in new built image""" + tag = f"{repo}{image}:{custom_tag}.{patch}-{sha1}" if custom_tag else sha1_tag + if gen_latest: + tag_latest = tag.split(":")[0] + tag_latest = f"{tag_latest}:latest" + return f"--tag {tag} --tag {tag_latest}" + + return f"--tag {tag}" + + +def update_github(repo, image, image_tag): + """Updates github action""" + action_image = GITHUB_ACTION_IMAGES.get(image) + action_usage_repo = action_image.get("used_by") + + github.ImageReferenceUpdater(action_usage_repo).update_usage( + action_usage_repo.get("default_branch"), + f"feat/update-{image}", + f"{repo}{image}", + image_tag, + ) + + +def ci_build(*args, **kwargs): + build_and_push(*args, **kwargs, push=False) + + +def gc_images( + registry, token, cluster_domain_suffix_mask="", repository="", dry_run=False +): + registries = ("hub.docker.com", "taqy-docker.artifactrepo.jnj.com") + if registry not in registries: + print("Unknown registry. Use one of:", registries) + return + + all_images = releases_images(all_releases()) + active_images = releases_images(active_releases(cluster_domain_suffix_mask)) + unused_images = all_images - active_images + for image in sorted(unused_images): + repo, tag = image.split(":") + if not repo.startswith("datacovesprivate/"): + continue + if repository and repository != repo: + continue + print(image) + if not dry_run: + delete_image_from_registry(registry, token, repo, tag) + + +def active_image_tags(cluster_domain_suffix_mask="", repository=""): + active_images = releases_images(active_releases(cluster_domain_suffix_mask)) + for image in sorted(active_images): + repo, tag = image.split(":") + if repository and repository != repo: + continue + print(image) + + +def delete_image_from_registry(registry, token, repo, tag): + response = requests.delete( + f"https://{registry}/v2/repositories/{repo}/tags/{tag}/", + headers={ + "Accept": "application/json", + "Content-Type": "application/json", + "X-DOCKER-API-CLIENT": "docker-hub/1532.0.0", + "Authorization": f"Bearer {token}", + }, + ) + if response.status_code not in (204, 404): + raise Exception( + f"delete_image_from_registry: request failed with status code {response.status_code}" + ) + + +def publish_extensible_images(group, local_version, update_latest=True): + version = str(load_yaml(".version.yml")["version"]) + repos = public_repos_from_paths() + repos_from_paths() + + repos = [ + repo + for repo in repos + if any( + [ + repo.split("/")[-1].startswith(ext) + for ext in filter(lambda x: x == group, EXTENSIBLE_IMAGES) + ] + ) + ] + + if local_version == "True": + repos = [x for x in repos if x.endswith("-local")] + else: + repos = [x for x in repos if not x.endswith("-local")] + + images = latest_version_tags(version, repos) + + print("Images to pull:") + pprint(images, expand_all=True) + command = "echo $DOCKER_PASSWORD | docker login --username datacovesprivate --password-stdin" + print(command) + cmd.sh(command, env=os.environ.copy()) + _pull_images([f"{image}:{tag}" for image, tag in images.items()]) + + for image, tag in images.items(): + major, minor, patch = tag.split("-")[0].split(".") + image_major = f"{image}:{major}" + image_minor = f"{image}:{major}.{minor}" + image_patch = f"{image}:{major}.{minor}.{patch}" + image_latest = f"{image}:latest" + print(f"docker tag {image}:{tag} {image_major}") + cmd.sh(f"docker tag {image}:{tag} {image_major}") + print(f"docker tag {image}:{tag} {image_minor}") + cmd.sh(f"docker tag {image}:{tag} {image_minor}") + print(f"docker tag {image}:{tag} {image_patch}") + cmd.sh(f"docker tag {image}:{tag} {image_patch}") + + if update_latest: + print(f"docker tag {image}:{tag} {image_latest}") + cmd.sh(f"docker tag {image}:{tag} {image_latest}") + + print( + f"tagged {image} -> {major}, {major}.{minor}, {major}.{minor}.{patch}" + + (", and latest" if update_latest else "") + ) + cmd.sh(f"docker push {image_major}") + cmd.sh(f"docker push {image_minor}") + cmd.sh(f"docker push {image_patch}") + + if update_latest: + cmd.sh(f"docker push {image_latest}") + + +def get_all_images_build_args(): + """ + Returns list of all images to build + """ + + def args_builder(paths, repo="datacovesprivate"): + build_args = [] + for path in paths: + if path in COMMON_REQS_IMAGES: + for profile in VALID_PROFILES: + build_args.append({"repo": repo, "image": path, "profile": profile}) + + if path in HAS_LOCAL_PROFILES: + for profile in VALID_LOCAL_PROFILES: + build_args.append( + {"repo": repo, "image": path, "profile": profile} + ) + + else: + build_args.append({"repo": repo, "image": path, "profile": ""}) + return build_args + + return args_builder(_private_image_paths()) + args_builder( + _public_image_paths(), repo="datacoves" + ) + + +def get_buildable_image_map(): + """Generates a dictionary mapping buildable image names to the build + information from get_all_images_build_args which will be a dictionary + having keys 'repo', 'image', and 'profile' which together can be used + to build an image. + """ + + all_buildable_images = get_all_images_build_args() + buildable_images_to_paths = {} + + for image in all_buildable_images: + image_name = f"{image['repo']}/{image['image'].replace('/', '-')}" + + if image["profile"]: + image_name += f"-{image['profile']}" + + buildable_images_to_paths[image_name] = image + + return buildable_images_to_paths diff --git a/scripts/docker_images_test.py b/scripts/docker_images_test.py new file mode 100644 index 00000000..a8c012e8 --- /dev/null +++ b/scripts/docker_images_test.py @@ -0,0 +1,41 @@ +import unittest +from unittest.mock import patch + +from scripts.docker_images import release_images + + +class TestReleaseImages(unittest.TestCase): + @patch("scripts.docker_images.load_yaml") # Mocking the load_yaml function + def test_release_images(self, mock_load_yaml): + mock_load_yaml.return_value = { + "images": {"ci-test-image": "2.0.202309271350"}, + "airbyte_images": [], + "airflow_images": [], + "superset_images": [], + "observability_images": [], + "core_images": [], + "ci_images": { + "ci-multiarch": "1.0.202309271350", # Not in EXTENSIBLE_IMAGES + "ci-basic": "1.1.202309271350", # Is in EXTENSIBLE_IMAGES + "datacoves/ci-airflow": "2.1.202309271350", # Is in EXTENSIBLE_IMAGES + }, + } + + release_name = "2.0.202309271350" + exclude_patterns = None + result = release_images(release_name, exclude_patterns) + + expected_result = set( + [ + "ci-test-image:2.0.202309271350", + "ci-multiarch:1.0.202309271350", + "ci-basic:1.1.202309271350", + "ci-basic:1", + "ci-basic:1.1", + "datacoves/ci-airflow:2.1.202309271350", + "datacoves/ci-airflow:2.1", + "datacoves/ci-airflow:2", + ] + ) + + self.assertEqual(set(result), expected_result) diff --git a/scripts/dump_database.py b/scripts/dump_database.py new file mode 100644 index 00000000..e5f1b7a6 --- /dev/null +++ b/scripts/dump_database.py @@ -0,0 +1,33 @@ +from datetime import datetime +from pathlib import Path + +from lib.config import config as the +from scripts import console, k8s_utils + + +def dump_database(cluster_domain: str): + """ + Dump chosen cluster's API database to a file. + """ + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + api_pod = k8s_utils.pod_for_deployment("core", "api") + if api_pod: + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + "core", api_pod, capture_output=True, container="api" + ) + dumped_data = run_in_api_pod("./manage.py dumpdata", encoding="utf-8") + if dumped_data: + # TODO: Make user pick a destination: local / Bastion / s3 + write_local(dumped_data) + + +def write_local(data): + Path("db_dumps").mkdir(parents=True, exist_ok=True) + now = datetime.now() + dump_file_path = ( + f"db_dumps/{the.config['release']}_{now.strftime('%Y%m%d_%H%M%S')}.json" + ) + with open(dump_file_path, "w") as f: + f.write(data) + console.print_title(f"Database dump saved to {dump_file_path}") diff --git a/scripts/estimates/__init__.py b/scripts/estimates/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/estimates/eks_nodes.py b/scripts/estimates/eks_nodes.py new file mode 100644 index 00000000..8e6f189d --- /dev/null +++ b/scripts/estimates/eks_nodes.py @@ -0,0 +1,104 @@ +import json + +# INSTANCE SPECS ############################################################### + +max_eks_nodes_per_cluster = 2000 +max_volume_attachments = 26 +max_attachments = 28 + + +# aws ec2 describe-instance-types --filters "Name=instance-type,Values=*" --query "InstanceTypes[]" --output json > ec2-instance-types.json # noqa +with open("scripts/data/ec2-instance-types.json", "r") as f: + instance_types = json.load(f) + + +def max_eni(instance_type): + return instance_type["NetworkInfo"]["MaximumNetworkInterfaces"] + + +def max_ips(instance_type): + return instance_type["NetworkInfo"]["Ipv4AddressesPerInterface"] + + +def memory_megabytes(instance_type): + return instance_type["MemoryInfo"]["SizeInMiB"] + + +# REQUIREMENTS ################################################################# + + +# reqs_per_project = { +# "airbyte": { +# "memory_megabytes": 0, +# }, +# "airflow": { +# "memory_megabytes": 0, +# }, +# "workbench": { +# "memory_megabytes": 0, +# }, +# "pomerium": { +# "memory_megabytes": 0, +# }, +# "superset": { +# "memory_megabytes": 0, +# }, +# "prod_dbt_docs": { +# "memory_megabytes": 0, +# "elb_volumes": 1, +# }, +# } + + +reqs_per_user = { + "code_server": { # includes dbt_docs in the same pod + "memory_megabytes": 2048, + "elb_volumes": 1, + }, +} + + +def user_pods_per_node(instance_type): + req_memory = reqs_per_user["code_server"]["memory_megabytes"] + req_volumes = reqs_per_user["code_server"]["elb_volumes"] + memory = memory_megabytes(instance_type) + limit_from_memory = memory // req_memory + + # Not sure if all these attachments are always used, and if I'm interpreting + # the data and docs correctly. This should be the worst case analysis. + instance_storage_attachments = sum( + disk["Count"] + for disk in instance_type.get("InstanceStorageInfo", {}).get("Disks", []) + ) + eni_attachments = max_eni(instance_type) + + limit_from_volumes = ( + min( + max_volume_attachments, + max_attachments - instance_storage_attachments - eni_attachments, + ) + // req_volumes + ) + + pods = min(limit_from_memory, limit_from_volumes) + + return pods, limit_from_volumes, limit_from_memory, memory + + +def eks_instance_type_candidates(): + candidates = [] + for t in instance_types: + it = t["InstanceType"] + + # filter by prefix for "general purpose" types + # Alleged prefix naming convention: https://stackoverflow.com/a/60512622 + if it[0].lower() not in ("m", "t"): + continue + if it[2] != ".": + continue + n, lv, lm, mem = user_pods_per_node(t) + memory_waste_in_pods = 1 + if n > 0 and (lv <= lm and lm <= lv + memory_waste_in_pods or lm <= lv): + candidates.append((n, it, lv, lm, mem)) + candidates.sort() + return candidates diff --git a/scripts/github.py b/scripts/github.py new file mode 100644 index 00000000..e6ee56ca --- /dev/null +++ b/scripts/github.py @@ -0,0 +1,344 @@ +import base64 +import json +import os +import re +from datetime import datetime +from os import listdir +from pathlib import Path +from typing import Dict + +import github +import requests +from github import ( + ContentFile, + GithubException, + GithubObject, + InputGitAuthor, + Repository, +) + +from lib import cmd +from lib.config_files import load_yaml, secret_value_from_yaml, write_yaml + + +def get_token(): + secret_token_path = "secrets/cli.secret.yaml" + github_token = os.environ.get("DATACOVES_GITHUB_API_TOKEN") + if not github_token: + github_token = secret_value_from_yaml( + Path(secret_token_path), "github_api_token" + ) + return github_token + + +class Releaser: + """ + This class gets and creates GitHub releases for a specific repository + """ + + def __init__(self, repo="datacoves/datacoves") -> Repository: + github_token = get_token() + client = github.Github(github_token) + self.repository = client.get_repo(repo) + + def get_latest_releases(self, include_drafts=False): + """ + Returns the first page including the latest releases. + Draft releases that are not a pre-release are filtered out + """ + return [ + release + for release in self.repository.get_releases().get_page(0) + if not release.draft or release.title.startswith("pre") or include_drafts + ] + + def download_releases(self, include_drafts=False, cleanup=False): + releases = self.get_latest_releases(include_drafts=include_drafts) + github_token = get_token() + headers = { + "Authorization": "token " + github_token, + "Accept": "application/octet-stream", + } + + folder = Path("releases") + folder.mkdir(parents=True, exist_ok=True) + older = None + file_names = [] + + for release in releases: + file_name = ( + release.title[1:] if release.title[0] == "v" else release.title + ) + ".yaml" + file_names.append(file_name) + release_file = Path(f"releases/{file_name}") + + if release_file.exists() and not release.draft: + print(f"Skipping already existent release file {release.title}.") + release_yaml = load_yaml(release_file) + else: + print(f"Downloading release {release.title}") + asset = release.get_assets()[0] + session = requests.Session() + response = session.get(asset.url, stream=True, headers=headers) + with open(release_file, "wb") as f: + for chunk in response.iter_content(1024 * 1024): + f.write(chunk) + # Updating notes from GitHub release notes + release_yaml = load_yaml(release_file) + release_yaml["notes"] = release.body + write_yaml(release_file, release_yaml) + + released_at = datetime.fromisoformat(release_yaml["released_at"]) + if not older or older > released_at: + older = released_at + + if cleanup: + releases_dir = Path("releases") + releases = [ + f for f in listdir(releases_dir) if Path(releases_dir / f).is_file() + ] + for release_name in releases: + file = releases_dir / release_name + release_yaml = load_yaml(file) + # If release is older than the one downloaded from github and does not exist on github + if ( + datetime.fromisoformat(release_yaml["released_at"]) > older + and release_name not in file_names + ): + print(f"Deleting unexistent release on GitHub {file}") + file.unlink() + + def create_release( + self, + name, + commit, + is_prerelease=False, + notes="", + author_name="Datacoves", + author_email="support@datacoves.com", + ): + version = name if is_prerelease else f"v{name}" + + if is_prerelease: + # We delete the older pre-release if exists + releases = self.get_latest_releases() + for release in releases: + if release.title == version: + release.delete_release() + try: + release = self.repository.create_git_tag_and_release( + version, + version, + version, + notes, + commit, + "commit", + InputGitAuthor(author_name, author_email), + True, + is_prerelease, + False, + ) + release.upload_asset( + f"releases/{name}.yaml", + "manifest.yaml", + "application/yaml", + "manifest.yaml", + ) + except GithubException as ex: + if ex.status == 422: + print("Push your changes to GitHub before creating a new release.") + exit() + raise + return release + + +class ImageReferenceUpdater: + """ + This class updates references to datacoves public images by creating + PRs on Github that change the image referenced on a github workflow + """ + + def __init__(self, usage_repo): + usage_repo_owner = usage_repo.get("owner") + usage_repo_name = usage_repo.get("name") + github_token = get_token() + g_usage = github.Github(github_token) + + self.github_access_token = github_token + + self.usage_repo_owner = usage_repo_owner + self.usage_repo_name = usage_repo_name + self.usage_repository = g_usage.get_repo( + f"{usage_repo_owner}/{usage_repo_name}" + ) + + def _get_contents(self, repository, path, ref=GithubObject.NotSet) -> ContentFile: + return repository.get_contents(path, ref) + + def _update_file( + self, + repository, + old_file: ContentFile, + commit_message: str, + modified_content: str, + branch: str = GithubObject.NotSet, + ) -> Dict: + return repository.update_file( + old_file.path, commit_message, modified_content, old_file.sha, branch + ) + + def _update_ci_workflow_content(self, file, image, image_tag, target_branch): + original_file_content = base64.b64decode(file.content).decode() + modified_file_content, subns = re.subn( + rf"{image}:[a-z\d\-\.]+", image_tag, original_file_content + ) + if subns == 0: + return {} + + return self.usage_repository.update_file( + file.path, + f"Update {file.name}", + modified_file_content, + file.sha, + target_branch, + ) + + def _close_existent_pull_requests_for_image( + self, default_branch_name, image, target_branch_name + ): + pull_requests = self.usage_repository.get_pulls( + state="open", sort="created", base=default_branch_name + ) + for pr in pull_requests: + if pr.title == target_branch_name: + # Close the PR + req_head = { + "Accept": "application/vnd.github+json", + "Authorization": f"Bearer {self.github_access_token}", + } + req_url = ( + f"https://api.github.com/repos/{self.usage_repo_owner}" + f"/{self.usage_repo_name}/pulls/{pr.number}" + ) + req_body = {"state": "closed"} + req_response = requests.patch( + req_url, data=json.dumps(req_body), headers=req_head + ) + if req_response.status_code == 200: + print( + f"Existent pull request for {image} found. PR '{pr.title}' closed" + ) + # Delete PR branch + branch_ref = self.usage_repository.get_git_ref(f"heads/{pr.title}") + print(f"Branch {branch_ref.ref} deleted") + branch_ref.delete() + + def _create_pull_request_on_usage_repo( + self, target_branch_name, default_branch_name, image + ): + self.usage_repository.create_pull( + title=target_branch_name, + body=target_branch_name, + head=target_branch_name, + base=default_branch_name, + ) + print( + f"Pull Request '{target_branch_name}' created in repository {self.usage_repository.name}" + ) + + def update_usage(self, default_branch_name, target_branch_name, image, image_tag): + self._close_existent_pull_requests_for_image( + default_branch_name, image, target_branch_name + ) + + usage_repo_workflows = self._get_contents( + self.usage_repository, ".github/workflows" + ) + update_results = [] + source_branch = self.usage_repository.get_branch(default_branch_name) + self.usage_repository.create_git_ref( + ref="refs/heads/" + target_branch_name, sha=source_branch.commit.sha + ) + + for workflow in usage_repo_workflows: + update_result = self._update_ci_workflow_content( + workflow, image, image_tag, target_branch_name + ) + if update_result and "commit" in update_result: + update_results.append(update_result["commit"]) + + if update_results: + # Create PR on usage repository + self._create_pull_request_on_usage_repo( + target_branch_name, default_branch_name, image + ) + + +def get_prs_between(start: str, end: str) -> list: + """Returns a list of pull request ID's (in integer format) that exist + between tags 'start' and 'end'. + + This uses the current user's GIT checkout to get the data, and will + do a git fetch -a as part of this. It is easier to do it this way + then to try and get it from the git API. + """ + + # Make sure start and end start with a 'v' for this. + if start[0] != "v": + start = f"v{start}" + + if end[0] != "v": + end = f"v{end}" + + # Run git fetch -a first, to get all tags. + cmd.output("git fetch -a") + + command = [ + "git", + "log", + f"{start}..{end}", + "--reverse", + "--merges", + "--oneline", + "--grep=Merge pull request #", + ] + + prs = [] + + for line in cmd.output(command).split("\n"): + match = re.search("pull request #(\\d+) from", line) + + if match: + prs.append(int(match.group(1))) + + return prs + + +def get_prs_with_label(start: str, end: str, name: str) -> list: + """Get a list of PR's between 'start' and 'end' which have a certain + label name applied. + + This is only designed for collecting documentation for deployment, + and as such is hard-coded to use the datacoves/datacoves repo. There + is no real reason to support any other repos at this time. + + This will be a list of Issue objects from the Github library. + """ + + prs = get_prs_between(start, end) + + github_token = get_token() + client = github.Github(github_token) + repo = client.get_repo("datacoves/datacoves") + + issues = [] + + for pr in prs: + issue = repo.get_issue(pr) + + for label in issue.get_labels(): + if label.name == name: + issues.append(issue) + break + + return issues diff --git a/scripts/helm_utils.py b/scripts/helm_utils.py new file mode 100644 index 00000000..1d58e12c --- /dev/null +++ b/scripts/helm_utils.py @@ -0,0 +1,133 @@ +import json + +from scripts import k8s_utils + + +def gc_helm_release(ns, release_name, keep=3): + revisions = get_helm_release_revisions(ns, release_name) + for i in range(0, len(revisions) - keep): + secret_name = revisions[i][1] + k8s_utils.kubectl(f"-n {ns} delete secret {secret_name}") + + +def retry_helm_release(ns, release_name, action="install"): + if action == "reinstall": + k8s_utils.helm(f"-n {ns} rollback {release_name}") + if action in ["install", "reinstall"]: + release = json.loads( + k8s_utils.kubectl_output(f"-n {ns} get helmrelease {release_name} -o json") + ) + chart = release["spec"]["chart"] + repo_name = release["spec"]["repoName"] + repo_url = release["spec"]["repoURL"] + values_name = release["spec"]["valuesName"] + version = release["spec"]["version"] + + print("chart:", chart) + print("repo_name:", repo_name) + print("repo_url:", repo_url) + print("values_name:", values_name) + print("version:", version) + + values = json.loads( + k8s_utils.kubectl_output(f"-n {ns} get cm {values_name} -o json") + ) + values_file = f"{values_name}.yaml" + with open(values_file, "w+") as f: + f.write(values["data"]["values.yaml"]) + + k8s_utils.helm(f"repo add {repo_name} {repo_url} --force-update") + k8s_utils.helm("repo update") + k8s_utils.helm( + f"-n {ns} upgrade --install {release_name} {chart}", + "--version", + version, + "-f", + values_file, + "--atomic", + "--force", + ) + else: + k8s_utils.helm(f"-n {ns} uninstall {release_name}") + + +def nuke_helm_release(ns, release_name, dump_to_file=True): + revisions = get_helm_release_revisions(ns, release_name) + if not revisions: + print("release not found") + return + + secret_name = revisions[-1][1] + if dump_to_file: + release_yaml = k8s_utils.kubectl_output( + f"-n {ns} get secret {secret_name} -o yaml" + ) + with open(f"{release_name}.yaml", "w+") as f: + f.write(release_yaml) + print(f"Release yaml saved to {release_name}.yaml") + + k8s_utils.kubectl(f"-n {ns} delete secret {secret_name}") + + +def get_helm_release_revisions(ns, release_name): + """Returns a sorted list of tuples (revision_number, helm_state_secret_name).""" + secret_prefix = f"sh.helm.release.v1.{release_name}.v" + revisions = [] + # Using -o name is cleaner, but it seems to hang in cases when the default + # column listing doesn't. So we parse the lines to get the name. + lines = k8s_utils.kubectl_output(f"-n {ns} get secret").split("\n")[1:] + for line in lines: + parts = line.split() + if not parts: + continue + secret_name = parts[0] + if not secret_name.startswith(secret_prefix): + continue + revision = int(secret_name[len(secret_prefix) :]) + revisions.append((revision, secret_name)) + return sorted(revisions) + + +def get_charts_installed(ns=None): + """Return tuples of (namespace, chart release, action) for each chart""" + o = k8s_utils.helm_output("list -A" if ns is None else f"list -n {ns}") + lines = o.split("\n")[1:] + charts = [] + for line in lines: + if not line: + continue + cols = line.split() + name, ns, state = cols[0], cols[1], cols[7] + charts.append((ns, name, state)) + return charts + + +def get_failed_env_charts(include_pending=False): + """Return tuples of (namespace, chart release, action) for each failing chart""" + filter = "--failed --pending" if include_pending else "--failed" + namespaces = k8s_utils.kubectl_output("get ns") + lines = [] + for ns in namespaces.split("\n")[1:]: + ns = ns.split(" ")[0] + if ns[:4] == "dcw-": + o = k8s_utils.helm_output(f"list -n {ns} {filter}") + lines += o.split("\n")[1:] + charts = [] + for line in lines: + if not line: + continue + cols = line.split() + name, ns, state = cols[0], cols[1], cols[7] + env_slug = ns.replace("dcw-", "") + workspace = json.loads( + k8s_utils.kubectl_output( + f"-n {ns} get workspace {env_slug}" " -o jsonpath='{.spec.services}'", + encoding="utf-8", + ).replace("'", "") + ) + service = workspace.get(name.replace(f"{env_slug}-", "")) + if service: + install_action = "reinstall" if state == "pending-upgrade" else "install" + action = install_action if service["enabled"] == "true" else "uninstall" + charts.append((ns, name, action)) + return charts diff --git a/scripts/installer.py b/scripts/installer.py new file mode 100644 index 00000000..2fc7b99e --- /dev/null +++ b/scripts/installer.py @@ -0,0 +1,229 @@ +import questionary +from questionary.prompts.common import Choice +from rich import print +from rich.panel import Panel + +from lib import argument_parsing_utils as arg_parse +from lib import cmd +from scripts import ( + console, + docker_images, + observability, + setup_admission_controller, + setup_core, + setup_operator, +) +from scripts.helm_utils import get_failed_env_charts, retry_helm_release + + +def install_datacoves(cluster_domain: str, automate: bool = False): + not_local = cluster_domain != "datacoveslocal.com" + if not_local and not automate: + if ( + not questionary.confirm("Have you `git pull` config folder?").ask() + or not questionary.confirm( + "Have you configured and revealed secrets (`git secret reveal`)?" + ).ask() + or not questionary.confirm( + "Did you check that there aren't untracked files in the config repo (`git status`)?" + ).ask() + ): + exit() + + if automate: + selected = [ + "images", + "operator", + "core", + "admission", + "observability", + ] + + else: + selected = questionary.checkbox( + "Uncheck the tasks you want to skip", + choices=[ + Choice( + "Deploy Images to Custom Registry", + value="images", + checked=not_local, + ), + Choice("Setup Datacoves Operator", value="operator", checked=True), + Choice("Setup Datacoves Core", value="core", checked=True), + Choice("Set Maintenance Mode", value="maintenance", checked=False), + Choice( + "Setup Admission Controller", value="admission", checked=not_local + ), + Choice( + "Setup Observability Stack", + value="observability", + checked=not_local, + ), + ], + ).ask() + + if not selected: + exit() + + if "images" in selected: + console.print_title("Deploying images to custom container registry") + docker_images.deploy_images(cluster_domain) + if "operator" in selected: + console.print_title("Setting up Datacoves Operator") + setup_operator.setup_operator(cluster_domain) + if "core" in selected: + console.print_title("Setting up Datacoves Core Services") + setup_core.setup_core(cluster_domain) + if "maintenance" in selected: + set_maintenance_mode(cluster_domain) + if "admission" in selected: + console.print_title("Setting up Admission Controller") + setup_admission_controller.setup_admission_controller(cluster_domain) + if "observability" in selected: + console.print_title("Setting up Observability Stack") + observability.setup_observability_main.setup(cluster_domain, automate) + + retry_helm_charts(prompt=(not automate)) + + +def set_maintenance_mode(cluster_domain: str): + """Sets cluster on maintenance mode""" + console.print_title("Set Maintenance Mode") + switch = questionary.select("Switch:", choices=["on", "off"], default="on").ask() + restore_time = "today at 8PM UTC" + contact_email = "support@datacoves.com" + contact_name = "Our Support Team" + + if switch == "on": + restore_time = questionary.text("Restore time:", default=restore_time).ask() + contact_email = questionary.text("Contact time:", default=contact_email).ask() + contact_name = questionary.text("Contact name:", default=contact_name).ask() + + cluster_domain = arg_parse.parse_cluster_domain(cluster_domain) + setup_core.setup_maintenance_page( + cluster_domain=cluster_domain, + on_maintenance=switch in ["on", "ON", "On"], + restore_time=restore_time, + contact_email=contact_email, + contact_name=contact_name, + ) + + +def retry_helm_charts(include_pending=False, prompt=True): + state = "failed/pending" if include_pending else "failed" + console.print_title("Validating workspace helm charts") + charts = get_failed_env_charts(include_pending=include_pending) + if charts: + choices = [ + Choice(f"{chart[2]} {chart[1]}", value=chart, checked=False) + for chart in charts + ] + if prompt: + selected = questionary.checkbox( + f"Workspace helm charts on {state} state were found, choose corrective actions", + choices=choices, + ).ask() + else: + selected = choices + if selected: + for chart in selected: + retry_helm_release(chart[0], chart[1], chart[2]) + else: + print(f"No {state} workspace helm charts found.") + + +INSTALLER_FILES = [ + "cli.py", + "requirements.txt", + ".gitignore", + ".version.yml", + "lib/", + "releases/", + "scripts/", + "src/core/operator/config", + "src/core/admission-controller/charts", +] + + +def bundle_installer(*cluster_domains): + tarfile = "installer.tar" + files = INSTALLER_FILES.copy() + + for cluster_domain in cluster_domains: + config_path = f"config/{cluster_domain}" + files.append(config_path) + + if questionary.confirm(f"Confirm secrets reveal on {config_path}?").ask(): + cmd.run("git secret reveal -f", cwd=config_path) + + cmd.run(f"tar -cf {tarfile}", *files) + + for cluster_domain in cluster_domains: + print( + Panel( + f""" + Scripts, config and secrets have been bundled on [u]{tarfile}[/u]. + Next steps: + 1. Create k8s cluster and checkout README.md + 2. pip3 install -r requirements.txt + 3. ./cli.py setup_base {cluster_domain} + 4. ./cli.py install {cluster_domain} + """, + title="Datacoves Installer", + ) + ) + + +def rsync_to_client_mirror(destination, client): + files = ( + INSTALLER_FILES + + [f"docs/client-docs/{client}"] + + [ + "docs/how-tos/grafana-loki-storage-config-providers.md", + "docs/how-tos/debug-airflow-workers.md", + "docs/how-tos/img/debug-airflow-workers-1-min.png", + "docs/how-tos/img/debug-airflow-workers-2-min.png", + "docs/how-tos/img/loki-aws-1-min.png", + "docs/how-tos/img/loki-aws-2-min.png", + "docs/how-tos/img/loki-aws-3-min.png", + ] + + [f"docs/how-tos/img/loki-azure-{i + 1}-min.png" for i in range(7)] + ) + return rsync_to_mirror(destination, files) + + +def rsync_to_mirror(destination, sources, include_secrets=False): + filter_rules = [ + # Exclude git secret's files. Mirrors have their own. + "- *.secret", + "- .gitsecret", + # Misc. + "- __pycache__", + "- node_modules", + ] + + filter_rules_to_exclude_secrets = [ + "- *.cer", + "- *.key", + "- id_rsa", + "- .env", + "- .env.*", + "- *.env", + "- *.secret-env", + "- *.secret.yaml", + "- *.secret.json", + ] + + filter_args = [] + for rule in filter_rules: + filter_args += ("-f", rule) + if not include_secrets: + for rule in filter_rules_to_exclude_secrets: + filter_args += ("-f", rule) + + for i, src in enumerate(sources): + if src.endswith("/"): + src = src[:-1] + sources[i] = f"././{src}" + + cmd.run("rsync -aRvL --delete", *filter_args, *sources, destination) diff --git a/scripts/k8s_utils.py b/scripts/k8s_utils.py new file mode 100644 index 00000000..645d9c7b --- /dev/null +++ b/scripts/k8s_utils.py @@ -0,0 +1,203 @@ +import re +import subprocess + +from lib import cmd + +kube_context = None + + +def set_context(ctx): + global kube_context + assert type(ctx) is str + kube_context = ctx + + +def get_context(): + assert kube_context is not None + return kube_context + + +def make_kcmd(cmd_func, exec, ctx_flag): + def kcmd(command, *args, **kwargs): + assert kube_context, "k8s_utils.set_context not called" + if isinstance(command, str): + return cmd_func( + f"{exec} {ctx_flag} {kube_context} {command}", *args, **kwargs + ) + else: + _cmd = [exec, ctx_flag, kube_context] + command + return cmd_func(_cmd, *args, **kwargs) + + return kcmd + + +kubectl = make_kcmd(cmd.run, "kubectl", "--context") +helm = make_kcmd(cmd.run, "helm", "--kube-context") +kubectl_output = make_kcmd(cmd.output, "kubectl", "--context") +helm_output = make_kcmd(cmd.output, "helm", "--kube-context") + + +def exists_resource(ns, resource, name) -> bool: + try: + kubectl_output(f"get -n {ns} {resource} {name}") + return True + except subprocess.CalledProcessError: + return False + + +def exists_namespace(ns) -> bool: + try: + kubectl_output(f"get namespace {ns}") + return True + except subprocess.CalledProcessError: + return False + + +def create_namespace(ns): + if not exists_namespace(ns=ns): + kubectl(f"create namespace {ns}") + + +def wait_for_deployment(ns, deployment): + kubectl(f"-n {ns} rollout status deployment/{deployment}") + + +def wait_for_statefulset(ns, statefulset): + kubectl(f"-n {ns} rollout status statefulset/{statefulset}") + + +def pod_for_deployment(ns, deployment): + return pod_by_label(ns=ns, label=f"app={deployment}") + + +def pod_by_label(ns, label): + o = kubectl_output(f"-n {ns} get pods -l {label}") + lines = o.split("\n")[1:] + for l in lines: + cols = l.split() + if len(cols) < 2: + continue + status = cols[2] + name = cols[0] + if status != "Running": + continue + return name + return None + + +def get_deployments(ns): + o = kubectl_output(f"-n {ns} get deployments") + lines = o.split("\n")[1:] + deploys = [] + for l in lines: + if l: + cols = l.split() + deploys.append(cols[0]) # name + + return deploys + + +def get_hpas(ns): + o = kubectl_output(f"-n {ns} get hpa") + lines = o.split("\n")[1:] + hpas = [] + for l in lines: + if l: + cols = l.split() + hpas.append(cols[0]) # name + + return hpas + + +def namespace_release(ns): + "Returns current datacoves release of a namespace (either core or an environment)" + o = kubectl_output(f"describe ns {ns}") + release = re.search(r"k8s\.datacoves\.com\/release=([\w\.\-]+)", o) + if release: + return release.group(1) + return None + + +def get_env_namespaces(): + "Returns current datacoves release of a namespace (either core or an environment)" + o = kubectl_output("get ns --selector=k8s.datacoves.com/environment-type") + return re.findall(r"(dcw\-\w+)", o) + + +def cmd_runner_in_pod(ns, pod, container=None, capture_output=False): + def run(command, *args, **kwargs): + container_cmd = f"--container {container}" if container else "" + if isinstance(command, str): + _cmd = f"-n {ns} exec -it {pod} {container_cmd} -- {command}" + else: + _cmd = ["-n", ns, "exec", "-it", pod, "--"] + command + + if capture_output: + return kubectl_output(_cmd, *args, **kwargs) + return kubectl(_cmd, *args, **kwargs) + + return run + + +def service_port(port=80, target_port=80, protocol="TCP"): + return {"protocol": protocol, "port": port, "targetPort": target_port} + + +def gen_service(name, ports=None, port=80, target_port=80, protocol="TCP"): + if not ports: + ports = [service_port(port=port, target_port=target_port, protocol=protocol)] + return { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "name": f"{name}-svc", + }, + "spec": { + "selector": { + "app": name, + }, + "ports": ports, + }, + } + + +def k8s_env(env): + return [{"name": name, "value": value} for name, value in env.items()] + + +def k8s_env_from_config_map(config_map_name, vars): + if isinstance(vars, dict): + return [ + { + "name": k, + "valueFrom": {"configMapKeyRef": {"key": v, "name": config_map_name}}, + } + for k, v in vars.items() + ] + else: + return [ + { + "name": v, + "valueFrom": {"configMapKeyRef": {"key": v, "name": config_map_name}}, + } + for v in vars + ] + + +def k8s_env_from_secret(secret_name, vars): + if isinstance(vars, dict): + return [ + { + "name": k, + "valueFrom": {"secretKeyRef": {"key": v, "name": secret_name}}, + } + for k, v in vars.items() + ] + else: + return [ + { + "name": v, + "valueFrom": {"secretKeyRef": {"key": v, "name": secret_name}}, + } + for v in vars + ] diff --git a/scripts/notemerge.py b/scripts/notemerge.py new file mode 100644 index 00000000..fe91a0c7 --- /dev/null +++ b/scripts/notemerge.py @@ -0,0 +1,106 @@ +"""Compiles a "merged" release notes file further down into basic notes +for end users with grouped sections. + +Arguments: input file, output file + +Can be used as a library or as a command line. +""" + +import re +import sys + + +def merge_notes(inputfile: str, outputfile: str): + """Takes an input file and writes it to output file. The input file + looks for release notes in a format such as: + + **Header** + + - Some + - Notes + - Like + - This + * OR + * With + * Stars + + **Another Header** + + - more + - notes + + It merges headers and notes and provides a combined file. It is + smart enough that the capitalization of the headers doesn't matter. + + It removes lines that start with '# Release' and '**Full Changelog**' + and also cleans out a lot of blank lines. + + Any text found before the first header (asside from the filtered text + above) is included at the top of the file. Any text found between + sections will probably wind up injected in one of the sections in a + slightly undetermined behavior. + """ + + notes = None + + with open(inputfile, "rt") as input: + notes = input.read() + + # Matcher for sections: + section_re = re.compile(r"^[*][*]([\w\d\s]+)[*][*][\s]*$") # noqa + + # Section groups + # + # Blank section is for lines that will appear at the top of the file. + sections = {"": []} + + # What section are we in + current_section = "" + + for line in notes.split("\n"): + # Skip changelog lines and release lines + if line.lower().startswith("**full changelog**") or line.startswith( + "# Release" + ): + continue + + matches = section_re.match(line) + if matches: + current_section = matches.group(1).lower().title() + + if current_section not in sections: + sections[current_section] = [] + + elif line: + # Standardize on - vs * + if line[0] == "-": + line = f"*{line[1:]}" + + sections[current_section].append(line) + + # Write output + with open(outputfile, "wt") as output: + for line in sections[""]: + output.write(line) + output.write("\n") + + # Get rid of the blank section after writing it out, so that we + # don't write it out again. + del sections[""] + + for section, lines in sections.items(): + output.write("\n") + output.write(f"**{section}**") + output.write("\n\n") + + for line in lines: + output.write(line) + output.write("\n") + + +if __name__ == "__main__": + if len(sys.argv) != 3: + print("Syntax: inputfile outputfile") + sys.exit(-1) + + merge_notes(sys.argv[1], sys.argv[2]) diff --git a/scripts/observability/__init__.py b/scripts/observability/__init__.py new file mode 100644 index 00000000..88f7256f --- /dev/null +++ b/scripts/observability/__init__.py @@ -0,0 +1 @@ +from .setup_observability_main import setup # noqa diff --git a/scripts/observability/data/grafana-database-values.yaml b/scripts/observability/data/grafana-database-values.yaml new file mode 100644 index 00000000..a7094f0c --- /dev/null +++ b/scripts/observability/data/grafana-database-values.yaml @@ -0,0 +1,6 @@ +service: + ports: + postgresql: 5432 +primary: + persistence: + size: 20Gi \ No newline at end of file diff --git a/scripts/observability/data/grafana-rules.yaml b/scripts/observability/data/grafana-rules.yaml new file mode 100644 index 00000000..b7160bda --- /dev/null +++ b/scripts/observability/data/grafana-rules.yaml @@ -0,0 +1,143 @@ +groups: +- name: Instances + rules: + - alert: ContainerCpuUsage + expr: (sum(rate(container_cpu_usage_seconds_total{namespace=~"core|operator-system"}[3m])) + BY (container, namespace, instance, pod, name) * 100) > 90 + for: 30m + labels: + severity: warning + annotations: + summary: CPU usage > 90% on container {{ $labels.pod }}:{{ $labels.container }} on namespace {{ $labels.namespace }} + - alert: ContainerOOMKiller + expr: min_over_time(kube_pod_container_status_terminated_reason{reason="OOMKilled"}[10m]) == 1 + for: 0m + labels: + severity: critical + annotations: + summary: OOMKilled container {{ $labels.pod }}:{{ $labels.container }} on namespace {{ $labels.namespace }} + - alert: NodeOutOfDiskSpace + expr: "(node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and + ON (instance, device, mountpoint) node_filesystem_readonly == 0" + for: 30m + labels: + severity: warning + annotations: + summary: Disk free space < 10% on node {{ $labels.instance }} + - alert: NodeHighCpuLoad + expr: 100 - (avg by(instance, node) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 90 + for: 30m + labels: + severity: warning + annotations: + summary: CPU usage > 90% on node {{ $labels.instance }} + - alert: StatefulsetDown + expr: kube_statefulset_replicas != kube_statefulset_status_replicas_ready > 0 + for: 1m + labels: + severity: critical + annotations: + summary: StatefulSet {{ $labels.statefulset }} not ready for > 1 minute on namespace {{ $labels.namespace }} + - alert: PodCrashLooping + expr: increase(kube_pod_container_status_restarts_total{container!~"git-sync|s3-sync"} [5m]) > 2 + for: 0m + labels: + severity: critical + annotations: + summary: Crashloop on container {{ $labels.pod }}:{{ $labels.container }} on namespace {{ $labels.namespace }} + - alert: ReplicaSetMismatch + expr: kube_replicaset_spec_replicas != kube_replicaset_status_ready_replicas + for: 20m + labels: + severity: warning + annotations: + summary: Replica Set {{ $labels.replicaset }} not ready for > 20 minutes on namespace {{ $labels.namespace }} + - alert: StatefulsetUpdateNotRolledOut + expr: max without (revision) (kube_statefulset_status_current_revision unless kube_statefulset_status_update_revision) + * (kube_statefulset_replicas != kube_statefulset_status_replicas_updated) + for: 10m + labels: + severity: warning + annotations: + summary: StatefulSet {{ $labels.statefulset }} update not rolled out on namespace {{ $labels.namespace }} + - alert: PersistentVolumeClaimError + expr: kube_persistentvolumeclaim_status_phase{phase=~"Failed"} > 0 + for: 1m + labels: + severity: critical + annotations: + summary: PVC {{ $labels.persistentvolumeclaim }} not healthy on namespace {{ $labels.namespace }} + - alert: PodNotHealthy + expr: sum by (namespace, pod, node) (kube_pod_status_phase{phase=~"Pending|Unknown|Failed"} + and on(pod) kube_pod_labels{label_airflow_worker="", label_airbyte!~"job-pod|worker-pod"} + and on(pod) kube_pod_container_status_terminated_reason{reason!="OOMKilled"}) > 0 + for: 10m + labels: + severity: critical + annotations: + summary: Pod {{ $labels.pod }} not healthy on namespace {{ $labels.namespace }} + - alert: AirflowWorkerFailedToInit + expr: sum by (namespace) (kube_pod_init_container_status_terminated_reason{reason="Error", container=~"git-sync-init|s3-sync"} + and on(pod) kube_pod_labels{label_airflow_worker!=""}) > 0 + for: 1m + labels: + severity: critical + annotations: + summary: Airflow workers failed to copy dags from git/s3 on namespace {{ $labels.namespace }} + - alert: DiskPressure + expr: sum by (node) (kube_node_status_condition{condition="DiskPressure", status="true"}) > 0 + for: 1m + labels: + severity: critical + annotations: + summary: Node {{ $labels.node }} had DiskPressure condition + - alert: CeleryWorkerOffline + expr: sum(flower_worker_online{namespace="core"}) by (container) == 0 + for: 2m + labels: + severity: critical + context: celery-worker + annotations: + summary: Celery worker offline + description: Celery worker {{ $labels.worker }} has been offline for more than 2 minutes. + - alert: CeleryTaskFailureRatioTooHigh + expr: (sum(avg_over_time(flower_events_total{type="task-failed"}[15m])) by (task) / sum(avg_over_time(flower_events_total{type=~"task-failed|task-succeeded"}[15m])) by (task)) * 100 > 1 + for: 5m + labels: + severity: critical + context: celery-task + annotations: + summary: Task Failure Ratio Too High. + description: Average task failure ratio for task {{ $labels.task }} is {{ $value }}. + - alert: CeleryTaskPrefetchTimeTooHigh + expr: sum(avg_over_time(flower_task_prefetch_time_seconds[15m])) by (task, worker) > 1 + for: 5m + labels: + severity: critical + context: celery-task + annotations: + summary: Average Task Prefetch Time Too High. + description: Average task prefetch time at worker for task {{ $labels.task }} and worker {{ $labels.worker }} is {{ $value }}. + - alert: CeleryTaskPrefetchTimeTooHigh + expr: sum(avg_over_time(flower_task_prefetch_time_seconds[15m])) by (task, worker) > 1 + for: 5m + labels: + severity: critical + context: celery-task + annotations: + summary: Average Task Prefetch Time Too High. + description: Average task prefetch time at worker for task {{ $labels.task }} and worker {{ $labels.worker }} is {{ $value }}. + - alert: PodsStuckedInTerminatingStatus + expr: count(kube_pod_deletion_timestamp) by (namespace, pod) * count(kube_pod_status_reason{reason="NodeLost"} == 0) by (namespace, pod) > 0 + for: 5m + labels: + severity: warning + annotations: + summary: Pod {{$labels.namespace}}/{{$labels.pod}} stucked in terminating status. + - alert: HelmChartBadStatus + expr: sum(datacoves_helm_chart_info{status!="deployed"}) by (namespace, name, status) > 0 + for: 5m + labels: + severity: warning + annotations: + summary: Helm chart {{$labels.namespace}}/{{$labels.name}} on {{$labels.status}} status. \ No newline at end of file diff --git a/scripts/observability/data/loki-rules.yaml b/scripts/observability/data/loki-rules.yaml new file mode 100644 index 00000000..1ba72978 --- /dev/null +++ b/scripts/observability/data/loki-rules.yaml @@ -0,0 +1,30 @@ +groups: +- name: LogBasedAlerts + rules: + - alert: FailedToPullImagesFromRegistry + expr: | + count_over_time({agent_hostname="eventhandler", namespace=~"core|operator-system|dcw-.*"} |= "failed to pull and unpack image" [2m]) > 0 + for: "0m" + annotations: + summary: Image could not be pulled from registry + labels: + severity: critical + alert_type: log + - alert: MaxNodesGroupSizeReached + expr: | + count_over_time({agent_hostname="eventhandler"} |= "max node group size reached"[2m]) > 0 + for: "0m" + annotations: + summary: Node group has reached maximum node capacity + labels: + severity: critical + alert_type: log + - alert: ZombieAirflowTasksDetected + expr: | + count_over_time({namespace=~"dcw-.*", component="scheduler", container="scheduler", instance=~".*-airflow"} |= "Detected zombie job"[2m]) > 0 + for: "0m" + annotations: + summary: Zombie job detected on Airflow + labels: + severity: critical + alert_type: log \ No newline at end of file diff --git a/scripts/observability/grafana/dashboards/datacoves_nodes.json b/scripts/observability/grafana/dashboards/datacoves_nodes.json new file mode 100644 index 00000000..56ba4893 --- /dev/null +++ b/scripts/observability/grafana/dashboards/datacoves_nodes.json @@ -0,0 +1,744 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 17, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(kube_node_info) by (instance, internal_ip, kubelet_version, os_image)", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Nodes", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true + }, + "includeByName": {}, + "indexByName": {}, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 0 + }, + "id": 11, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "(\n node_memory_MemTotal_bytes{instance=\"$node\"}\n-\n node_memory_MemFree_bytes{instance=\"$node\"}\n-\n node_memory_Buffers_bytes{instance=\"$node\"}\n-\n node_memory_Cached_bytes{instance=\"$node\"}\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory used", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "node_memory_Buffers_bytes{instance=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory buffers", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "node_memory_Cached_bytes{instance=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory cached", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "node_memory_MemFree_bytes{instance=\"$node\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "memory free", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "node_memory_MemTotal_bytes{instance=\"$node\"}", + "hide": false, + "interval": "", + "legendFormat": "memory total", + "range": true, + "refId": "E" + } + ], + "title": "Memory Usage by Node [$node]", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 80 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 3, + "x": 18, + "y": 0 + }, + "id": 13, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "100 - (\n avg(node_memory_MemAvailable_bytes{instance=\"$node\"}) /\n avg(node_memory_MemTotal_bytes{instance=\"$node\"})\n* 100\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Memory Usage by Node [$node]", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "rgba(50, 172, 45, 0.97)", + "value": null + }, + { + "color": "rgba(237, 129, 40, 0.89)", + "value": 80 + }, + { + "color": "rgba(245, 54, 54, 0.9)", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 3, + "x": 21, + "y": 0 + }, + "id": 14, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "(\n avg(node_memory_MemFree_bytes{instance=\"$node\"}) /\n avg(node_memory_MemTotal_bytes{instance=\"$node\"})\n* 100\n)\n", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Memory Free by Node [$node]", + "type": "gauge" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_node_labels)", + "hide": false, + "instant": false, + "legendFormat": "total", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum (kube_node_labels{label_k8s_datacoves_com_nodegroup_kind!=\"\", label_k8s_datacoves_com_workers=\"\"}) by(label_k8s_datacoves_com_nodegroup_kind)", + "instant": false, + "legendFormat": "{{label_k8s_datacoves_com_nodegroup_kind}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum (kube_node_labels{label_k8s_datacoves_com_nodegroup_kind=\"\", label_k8s_datacoves_com_workers=\"enabled\"}) by(pod)", + "hide": false, + "instant": false, + "legendFormat": "workers", + "range": true, + "refId": "D" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum (kube_node_labels{label_k8s_datacoves_com_nodegroup_kind!=\"\", label_k8s_datacoves_com_workers=\"enabled\"}) by(label_k8s_datacoves_com_nodegroup_kind)", + "hide": false, + "instant": false, + "legendFormat": "{{label_k8s_datacoves_com_nodegroup_kind}} / worker", + "range": true, + "refId": "E" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum (kube_node_labels{label_k8s_datacoves_com_nodegroup_kind=\"\", label_k8s_datacoves_com_workers=\"\"}) by(pod)", + "hide": false, + "instant": false, + "legendFormat": "others", + "range": true, + "refId": "C" + } + ], + "title": "Node Group", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 18, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_node_labels{label_k8s_datacoves_com_workers=\"enabled\"})", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Worker Nodes", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [ + "Datacoves" + ], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": [ + "dcw-dev123" + ], + "value": [ + "dcw-dev123" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(kube_pod_container_info, namespace)", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": true, + "name": "namespace", + "options": [], + "query": { + "query": "label_values(kube_pod_container_info, namespace)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "current": { + "selected": false, + "text": "172.18.0.2:9100", + "value": "172.18.0.2:9100" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(node_memory_Buffers_bytes, instance)", + "description": "", + "hide": 0, + "includeAll": false, + "label": "Node", + "multi": false, + "name": "node", + "options": [], + "query": { + "query": "label_values(node_memory_Buffers_bytes, instance)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Datacoves Nodes", + "uid": "datacoves_nodes", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/scripts/observability/grafana/dashboards/datacoves_overview.json b/scripts/observability/grafana/dashboards/datacoves_overview.json new file mode 100644 index 00000000..f2e899bb --- /dev/null +++ b/scripts/observability/grafana/dashboards/datacoves_overview.json @@ -0,0 +1,642 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 4, + "panels": [], + "title": "Accounts and Environments", + "type": "row" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "is_active" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "False": { + "index": 1, + "text": "❌" + }, + "None": { + "index": 2, + "text": "-" + }, + "True": { + "index": 0, + "text": "✅" + } + }, + "type": "value" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Subscribed" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "False": { + "index": 1, + "text": "❌" + }, + "None": { + "index": 2, + "text": "-" + }, + "True": { + "index": 0, + "text": "✅" + } + }, + "type": "value" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "On Trial" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "False": { + "index": 1, + "text": "❌" + }, + "None": { + "index": 2, + "text": "-" + }, + "True": { + "index": 0, + "text": "✅" + } + }, + "type": "value" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 1 + }, + "id": 1, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(datacoves_account_info{slug=~\"$account_slug\"}) by (name, slug, is_active, is_subscribed, is_on_trial, remaining_trial_days, developer_licenses)", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Accounts", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true, + "__name__": true, + "container": true, + "endpoint": true, + "instance": true, + "job": true, + "namespace": true, + "pod": true, + "service": true + }, + "includeByName": {}, + "indexByName": { + "Time": 7, + "Value": 16, + "__name__": 8, + "container": 9, + "developer_licenses": 6, + "endpoint": 10, + "instance": 11, + "is_active": 2, + "is_on_trial": 4, + "is_subscribed": 3, + "job": 12, + "name": 0, + "namespace": 13, + "pod": 14, + "remaining_trial_days": 5, + "service": 15, + "slug": 1 + }, + "renameByName": { + "developer_licenses": "Developer Licenses", + "is_active": "Active", + "is_on_trial": "On Trial", + "is_subscribed": "Subscribed", + "name": "Name", + "remaining_trial_days": "Remaining Trial Days", + "slug": "Slug" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 12 + }, + "id": 2, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(datacoves_environment_info{account_slug=~\"$account_slug\"}) by (name, slug, release_name, update_strategy, account_name, account_slug, project_name, project_slug)", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Environments", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true, + "__name__": true, + "container": true, + "endpoint": true, + "instance": true, + "job": true, + "namespace": true, + "pod": true, + "service": true + }, + "includeByName": {}, + "indexByName": { + "Time": 8, + "Value": 17, + "__name__": 9, + "account_name": 4, + "account_slug": 5, + "container": 10, + "endpoint": 11, + "instance": 12, + "job": 13, + "name": 0, + "namespace": 14, + "pod": 15, + "project_name": 6, + "project_slug": 7, + "release_name": 2, + "service": 16, + "slug": 1, + "update_strategy": 3 + }, + "renameByName": { + "account_name": "Account Name", + "account_slug": "Account Slug", + "endpoint": "", + "name": "Name", + "project_name": "Project Name", + "project_slug": "Project Slug", + "release_name": "Release", + "slug": "Slug", + "update_strategy": "Update Strategy" + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 5, + "panels": [], + "title": "Helm Charts", + "type": "row" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Status" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "deployed": { + "color": "green", + "index": 0 + }, + "failed": { + "color": "red", + "index": 1 + } + }, + "type": "value" + }, + { + "options": { + "pattern": "pending", + "result": { + "color": "yellow", + "index": 2 + } + }, + "type": "regex" + } + ] + }, + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "color-background" + } + } + ] + } + ] + }, + "gridPos": { + "h": 21, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 3, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Namespace" + } + ] + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(datacoves_helm_chart_info{ns=~\"$ns\"}) by (name, namespace, status, chart, revision, app_version, updated)", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "Helm Chart", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true, + "__name__": true, + "container": true, + "endpoint": true, + "exported_namespace": true, + "instance": true, + "job": true, + "namespace": true, + "pod": true, + "revision": false, + "service": true + }, + "includeByName": {}, + "indexByName": { + "Time": 8, + "Value": 16, + "__name__": 9, + "app_version": 5, + "chart": 3, + "container": 10, + "endpoint": 11, + "instance": 12, + "job": 13, + "name": 0, + "namespace": 7, + "ns": 1, + "pod": 14, + "revision": 4, + "service": 15, + "status": 2, + "updated": 6 + }, + "renameByName": { + "app_version": "App Version", + "chart": "Chart", + "exported_namespace": "", + "name": "Name", + "ns": "Namespace", + "revision": "Revision", + "status": "Status", + "updated": "Updated" + } + } + } + ], + "type": "table" + } + ], + "schemaVersion": 39, + "tags": [ + "Datacoves", + "Accounts", + "Environments", + "Helm Charts" + ], + "templating": { + "list": [ + { + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(datacoves_account_info,slug)", + "hide": 0, + "includeAll": true, + "label": "Account", + "multi": true, + "name": "account_slug", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(datacoves_account_info,slug)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(kube_pod_container_info,namespace)", + "hide": 0, + "includeAll": true, + "label": "Namespace", + "multi": true, + "name": "ns", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(kube_pod_container_info,namespace)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Datacoves Overview", + "uid": "datacoves_overview", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/scripts/observability/grafana/dashboards/datacoves_pods.json b/scripts/observability/grafana/dashboards/datacoves_pods.json new file mode 100644 index 00000000..20007694 --- /dev/null +++ b/scripts/observability/grafana/dashboards/datacoves_pods.json @@ -0,0 +1,549 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "dash": [ + 10, + 10 + ], + "fill": "dash" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_pod_status_phase{namespace=\"$namespace\"}) by (phase)", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Pod Healthy by Namespace [$namespace]", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "dash": [ + 10, + 10 + ], + "fill": "dash" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_deployment_status_replicas_available{namespace=\"$namespace\"}) by (namespace)", + "instant": false, + "legendFormat": "running", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_deployment_spec_replicas{namespace=\"$namespace\"}) by (namespace)", + "hide": false, + "instant": false, + "legendFormat": "expected", + "range": true, + "refId": "B" + } + ], + "title": "Replicas Mismatch by Namespace [$namespace]", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(container_memory_working_set_bytes{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}) by (container)", + "instant": false, + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_pod_container_resource_limits{namespace=\"$namespace\", pod=\"$pod\", resource=\"memory\"}) by (container)", + "hide": false, + "instant": false, + "legendFormat": "{{container}} - limit", + "range": true, + "refId": "B" + } + ], + "title": "Memory Usage by Pod [$namespace/$pod]", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"$namespace\", pod=\"$pod\", container!=\"\"}[5m])) by (container)", + "instant": false, + "legendFormat": "{{container}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_pod_container_resource_limits{namespace=\"$namespace\", pod=\"$pod\", resource=\"cpu\"}) by (container)", + "hide": false, + "instant": false, + "legendFormat": "{{container}} limit", + "range": true, + "refId": "B" + } + ], + "title": "CPU Usage by Pod [$namespace/$pod]", + "type": "timeseries" + } + ], + "schemaVersion": 39, + "tags": [ + "Datacoves" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "dcw-dev123", + "value": "dcw-dev123" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(kube_namespace_labels,namespace)", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(kube_namespace_labels,namespace)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "current": { + "selected": false, + "text": "code-server-hey-0", + "value": "code-server-hey-0" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(kube_pod_info{namespace=\"$namespace\"},pod)", + "hide": 0, + "includeAll": false, + "label": "Pod", + "multi": false, + "name": "pod", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(kube_pod_info{namespace=\"$namespace\"},pod)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Datacoves Pods", + "uid": "dataccoves_pods", + "version": 1, + "weekStart": "" + } \ No newline at end of file diff --git a/scripts/observability/grafana/dashboards/django.json b/scripts/observability/grafana/dashboards/django.json new file mode 100644 index 00000000..3965a8be --- /dev/null +++ b/scripts/observability/grafana/dashboards/django.json @@ -0,0 +1,1624 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Django metrics dashboard using django-prometheus metrics exporter", + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": 17658, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": true, + "panels": [ + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 0 + }, + "id": 13, + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 40 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(irate(django_http_requests_total_by_transport_total{app=~\"^$application$\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "range": true, + "refId": "A" + } + ], + "title": "Requests", + "type": "stat" + }, + { + "datasource": { + "name": "Prometheus", + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 3, + "x": 6, + "y": 0 + }, + "id": 15, + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(irate(django_http_responses_total_by_status_total{status=~\"2.+\",app=~\"^$application$\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "2XX Responses", + "type": "stat" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 3, + "x": 9, + "y": 0 + }, + "id": 16, + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(irate(django_http_responses_total_by_status_total{status=~\"4.+\",app=~\"^$application$\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "4XX Responses", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + }, + { + "color": "red", + "value": 10 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 12, + "y": 0 + }, + "id": 37, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(max_over_time(django_db_errors_total{app=~\"^$application$\"}[$__range]))", + "format": "time_series", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "DB Query Errors", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "min": 0, + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 1 + }, + { + "color": "red", + "value": 5 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 15, + "y": 0 + }, + "id": 38, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(max_over_time(django_db_new_connection_errors_total{app=~\"^$application$\"}[$__range]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "DB Connection Errors", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 0, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "yellow", + "value": 0.4 + }, + { + "color": "green", + "value": 0.7 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 0 + }, + "id": 31, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(django_cache_get_hits_total{app=~\"^$application$\"}) by (backend) / sum(django_cache_get_total{app=~\"^$application$\"}) by (backend)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Cache Hit Ratio", + "type": "gauge" + }, + { + "datasource": { + "name": "Prometheus", + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 3, + "x": 6, + "y": 2 + }, + "id": 23, + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(irate(django_http_responses_total_by_status_total{status=~\"3.+\",app=~\"^$application$\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "3XX Responses", + "type": "stat" + }, + { + "datasource": { + "name": "Prometheus", + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 2, + "mappings": [ + { + "options": { + "match": "null", + "result": { + "text": "N/A" + } + }, + "type": "special" + } + ], + "noValue": "0", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 2, + "w": 3, + "x": 9, + "y": 2 + }, + "id": 17, + "maxDataPoints": 100, + "options": { + "colorMode": "none", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "text": { + "valueSize": 20 + }, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(irate(django_http_responses_total_by_status_total{status=~\"5.+\",app=~\"^$application$\"}[$__rate_interval]))", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "5XX Responses", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4 + }, + "id": 20, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "reqps" + }, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Spectral", + "steps": 64 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Responses" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(django_http_requests_latency_including_middlewares_seconds_bucket{app=~\"^$application$\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "interval": "", + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Number of Requests by Processing Time", + "type": "heatmap" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4 + }, + "id": 22, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "reqps" + }, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Spectral", + "steps": 64 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto", + "value": "Responses" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "bytes" + } + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(rate(django_http_responses_body_total_bytes_bucket{app=~\"^$application$\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "instant": false, + "interval": "", + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Number of Responses by Size", + "type": "heatmap" + }, + { + "datasource": { + "name": "Prometheus", + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "linearThreshold": 1, + "log": 2, + "type": "log" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "dashed" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 12 + }, + "id": 4, + "interval": "30s", + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.50, sum(irate(django_http_requests_latency_seconds_by_view_method_bucket{app=~\"^$application$\"}[$__rate_interval])) by (le))", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "50 quantile", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.95, sum(irate(django_http_requests_latency_seconds_by_view_method_bucket{app=~\"^$application$\"}[$__rate_interval])) by (le))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "95 quantile", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "histogram_quantile(0.99, sum(irate(django_http_requests_latency_seconds_by_view_method_bucket{app=~\"^$application$\"}[$__rate_interval])) by (le))", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 1, + "legendFormat": "99 quantile", + "range": true, + "refId": "C" + } + ], + "title": "Request Latency", + "type": "timeseries" + }, + { + "datasource": { + "name": "Prometheus", + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 12 + }, + "id": 11, + "interval": "30s", + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(irate(django_http_responses_total_by_status_total{app=~\"^$application$\"}[$__rate_interval])) by(status)", + "format": "time_series", + "interval": "", + "intervalFactor": 1, + "legendFormat": "{{status}}", + "range": true, + "refId": "A" + } + ], + "title": "Response Status", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 0, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.3 + }, + { + "color": "red", + "value": 0.6 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 20 + }, + "id": 25, + "options": { + "displayMode": "gradient", + "maxVizHeight": 300, + "minVizHeight": 10, + "minVizWidth": 0, + "namePlacement": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "sum" + ], + "fields": "", + "limit": 20, + "values": true + }, + "showUnfilled": true, + "sizing": "auto", + "text": {}, + "valueMode": "color" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "topk(20, (1 - (sum(max_over_time(django_http_requests_latency_seconds_by_view_method_bucket{app=~\"^$application$\",le=\"$threshold\"}[$__range]) / ignoring(le) max_over_time(django_http_requests_latency_seconds_by_view_method_count{app=~\"^$application$\"}[$__range])) by (method, view) / count(present_over_time(django_http_requests_latency_seconds_by_view_method_count{app=~\"^$application$\"}[$__range])) by (method, view))) > 0.0099)", + "format": "table", + "instant": true, + "interval": "", + "legendFormat": "{{method}} {{view}}", + "range": false, + "refId": "A" + } + ], + "title": "Top 20 Views by Response Time (> $threshold)", + "type": "bargauge" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "reqps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 20 + }, + "id": 2, + "interval": "30s", + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true, + "sortBy": "Mean", + "sortDesc": true + }, + "tooltip": { + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "9.3.1", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "topk(10, sum(irate(django_http_requests_total_by_view_transport_method_total{app=~\"^$application$\",view!~\"prometheus-django-metrics|healthcheck|.*shop_product_import_1c_status\"}[$__rate_interval])) by(method, view) > 0)", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Top Requests", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ops" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 28 + }, + "id": 35, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(irate(django_db_execute_total{app=~\"^$application$\"}[$__rate_interval])) by (vendor)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Database Total Queries", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "scaleDistribution": { + "type": "linear" + } + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 28 + }, + "id": 33, + "options": { + "calculate": false, + "cellGap": 1, + "cellValues": { + "unit": "ops" + }, + "color": { + "exponent": 0.5, + "fill": "dark-orange", + "mode": "scheme", + "reverse": false, + "scale": "exponential", + "scheme": "Spectral", + "steps": 64 + }, + "exemplars": { + "color": "rgba(255,0,255,0.7)" + }, + "filterValues": { + "le": 1e-9 + }, + "legend": { + "show": true + }, + "rowsFrame": { + "layout": "auto" + }, + "tooltip": { + "mode": "single", + "showColorScale": false, + "yHistogram": true + }, + "yAxis": { + "axisPlacement": "left", + "decimals": 0, + "reverse": false, + "unit": "s" + } + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(django_db_query_duration_seconds_bucket{app=~\"^$application$\"}[$__rate_interval])) by (le)", + "format": "heatmap", + "legendFormat": "{{le}}", + "range": true, + "refId": "A" + } + ], + "title": "Database Query Duration", + "type": "heatmap" + } + ], + "refresh": "30s", + "schemaVersion": 39, + "tags": [ + "Datacoves", + "Django" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(python_info,app)", + "hide": 0, + "includeAll": true, + "label": "application", + "multi": false, + "name": "application", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(python_info,app)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "current": { + "selected": false, + "text": "1s", + "value": "1.0" + }, + "description": "Request duration threshold", + "hide": 0, + "includeAll": false, + "label": "threshold", + "multi": false, + "name": "threshold", + "options": [ + { + "selected": false, + "text": "100ms", + "value": "0.1" + }, + { + "selected": false, + "text": "250ms", + "value": "0.25" + }, + { + "selected": false, + "text": "500ms", + "value": "0.5" + }, + { + "selected": false, + "text": "750ms", + "value": "0.75" + }, + { + "selected": true, + "text": "1s", + "value": "1.0" + }, + { + "selected": false, + "text": "2.5s", + "value": "2.5" + }, + { + "selected": false, + "text": "5s", + "value": "5.0" + }, + { + "selected": false, + "text": "7.5s", + "value": "7.5" + }, + { + "selected": false, + "text": "10s", + "value": "10.0" + }, + { + "selected": false, + "text": "25s", + "value": "25.0" + } + ], + "query": "100ms : 0.1, 250ms : 0.25, 500ms : 0.5, 750ms : 0.75, 1s : 1.0, 2.5s : 2.5, 5s : 5.0, 7.5s : 7.5, 10s : 10.0, 25s : 25.0", + "queryValue": "", + "skipUrlSync": false, + "type": "custom" + } + ] + }, + "time": { + "from": "now-5m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Django", + "uid": "django", + "version": 1, + "weekStart": "" + } \ No newline at end of file diff --git a/scripts/observability/grafana/dashboards/flower.json b/scripts/observability/grafana/dashboards/flower.json new file mode 100644 index 00000000..f00ebc11 --- /dev/null +++ b/scripts/observability/grafana/dashboards/flower.json @@ -0,0 +1,672 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "This panel shows status of celery workers. 1 = online, 0 = offline.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 0 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "10.4.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "flower_worker_online", + "instant": false, + "legendFormat": "{{ worker }}", + "range": true, + "refId": "A" + } + ], + "title": "Celery Worker Status", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "This panel shows number of tasks currently executing at worker.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "flower_worker_number_of_currently_executing_tasks", + "instant": false, + "legendFormat": "{{worker}}", + "range": true, + "refId": "A" + } + ], + "title": "Number of Tasks Currently Executing at Worker", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "This panel shows average task runtime at worker by worker and task name.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 8 + }, + "id": 3, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "lastNotNull" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "rate(flower_task_runtime_seconds_sum[5m]) / rate(flower_task_runtime_seconds_count[5m])", + "instant": false, + "legendFormat": "{{task}}, {{worker}}", + "range": true, + "refId": "A" + } + ], + "title": "Average Task Runtime at Worker", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "This panel shows task prefetch time at worker by worker and task name.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "flower_task_prefetch_time_seconds", + "instant": false, + "legendFormat": "{{task}}, {{worker}}", + "range": true, + "refId": "A" + } + ], + "title": "Task Prefetch Time at Worker", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "This panel shows number of tasks prefetched at worker by task and worker name.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 26 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "flower_worker_prefetched_tasks", + "instant": false, + "legendFormat": "{{task}}, {{worker}}", + "range": true, + "refId": "A" + } + ], + "title": "Number of Tasks Prefetched At Worker", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "This panel presents average task success ratio over time by task name.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 35 + }, + "id": 6, + "options": { + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "(sum(avg_over_time(flower_events_total{type=\"task-succeeded\"}[15m])) by (task) / sum(avg_over_time(flower_events_total{type=~\"task-failed|task-succeeded\"}[15m])) by (task)) * 100", + "instant": false, + "legendFormat": "{{task}}", + "range": true, + "refId": "A" + } + ], + "title": "Task Success Ratio", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "description": "This panel presents average task failure ratio over time by task name.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 35 + }, + "id": 7, + "options": { + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "(sum(avg_over_time(flower_events_total{type=\"task-failed\"}[15m])) by (task) / sum(avg_over_time(flower_events_total{type=~\"task-failed|task-succeeded\"}[15m])) by (task)) * 100", + "instant": false, + "legendFormat": "{{task}}", + "range": true, + "refId": "A" + } + ], + "title": "Task Failure Ratio", + "type": "piechart" + } + ], + "schemaVersion": 39, + "tags": [ + "Datacoves", + "Celery", + "Flower" + ], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Celery Monitoring", + "uid": "bdpz4yyt64v0gf", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/scripts/observability/grafana/dashboards/profile_image_set_build.json b/scripts/observability/grafana/dashboards/profile_image_set_build.json new file mode 100644 index 00000000..3c1db231 --- /dev/null +++ b/scripts/observability/grafana/dashboards/profile_image_set_build.json @@ -0,0 +1,1095 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 10, + "panels": [], + "title": "Overview", + "type": "row" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "filterable": true, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Fase" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "Failed": { + "color": "red", + "index": 2 + }, + "Pending": { + "color": "yellow", + "index": 1 + }, + "Running": { + "color": "green", + "index": 0 + }, + "Succeeded": { + "color": "blue", + "index": 3 + }, + "Unknown": { + "color": "gray", + "index": 4 + } + }, + "type": "value" + } + ] + }, + { + "id": "custom.width", + "value": 100 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Execution Time (last)" + }, + "properties": [ + { + "id": "unit", + "value": "dtdurations" + }, + { + "id": "custom.width", + "value": 297 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Pod" + }, + "properties": [ + { + "id": "custom.width", + "value": 300 + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 7, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": false, + "displayName": "Pod" + } + ] + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": "Prometheus", + "editorMode": "code", + "exemplar": false, + "expr": "kube_pod_status_phase{namespace=\"core\", pod=~\"kaniko.*\"} == 1", + "format": "table", + "instant": false, + "range": true, + "refId": "A" + }, + { + "datasource": "Prometheus", + "editorMode": "code", + "exemplar": false, + "expr": "time() - kube_pod_created{namespace=\"core\", pod=~\"kaniko.*\"}", + "format": "table", + "hide": false, + "instant": false, + "range": true, + "refId": "B" + } + ], + "title": "Pod Status", + "transformations": [ + { + "id": "labelsToFields", + "options": { + "valueLabel": "phase" + } + }, + { + "id": "merge", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value #A": true, + "Value #B": false, + "__name__": true, + "container": true, + "endpoint": true, + "instance": true, + "job": true, + "namespace": true, + "service": true, + "uid": true + }, + "includeByName": {}, + "indexByName": { + "Time": 0, + "Value #A": 11, + "Value #B": 12, + "__name__": 1, + "container": 2, + "endpoint": 3, + "instance": 4, + "job": 5, + "namespace": 6, + "phase": 8, + "pod": 7, + "service": 9, + "uid": 10 + }, + "renameByName": { + "Value": "Estado", + "Value #B": "Execution Time", + "namespace": "Namespace", + "phase": "Fase", + "pod": "Pod" + } + } + }, + { + "id": "groupBy", + "options": { + "fields": { + "Execution Time": { + "aggregations": [ + "last" + ], + "operation": "aggregate" + }, + "Execution time (seconds)": { + "aggregations": [ + "last" + ], + "operation": "aggregate" + }, + "Execution time in seconds": { + "aggregations": [ + "last" + ], + "operation": "aggregate" + }, + "Fase": { + "aggregations": [ + "last" + ], + "operation": "aggregate" + }, + "Pod": { + "aggregations": [], + "operation": "groupby" + }, + "Tiempo de ejecución": { + "aggregations": [ + "lastNotNull" + ], + "operation": "aggregate" + }, + "Tiempo de ejecución (segundos)": { + "aggregations": [ + "last" + ], + "operation": "aggregate" + } + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Failed" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Pending" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Running" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Succeeded" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Unknown" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 6, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_pod_status_phase{namespace=\"core\", pod=~\"kaniko.*\"}) by (phase)", + "instant": false, + "legendFormat": "{{phase}}", + "range": true, + "refId": "A" + } + ], + "title": "Pod Status", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Failed" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Pending" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Running" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Succeeded" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Unknown" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "purple", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 8, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "sum(kube_pod_status_phase{namespace=\"core\", pod=~\"kaniko.*\"}) by (phase)", + "format": "time_series", + "instant": false, + "legendFormat": "{{phase}}", + "range": true, + "refId": "A" + } + ], + "title": "Pod Status", + "type": "stat" + }, + { + "datasource": { + "default": false, + "type": "loki", + "uid": "datacoves_loki" + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 9 + }, + "id": 1, + "options": { + "dedupStrategy": "none", + "enableLogDetails": true, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Ascending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "datacoves_loki" + }, + "editorMode": "code", + "expr": "{namespace=\"core\", pod=~\"kaniko-.*\"}", + "queryType": "range", + "refId": "A" + } + ], + "title": "Logs of all Pods", + "type": "logs" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 23 + }, + "id": 11, + "panels": [], + "title": "Details by Pod [Kaniko-$build_id]", + "type": "row" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Memory Limit" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 3, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(container_memory_working_set_bytes{namespace=\"core\", pod=\"kaniko-$build_id\", container=\"kaniko\"}) by (pod)", + "instant": false, + "legendFormat": "Memory Usage", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_pod_container_resource_limits{namespace=\"core\", pod=\"kaniko-$build_id\", container=\"kaniko\", resource=\"memory\"}) by (pod)", + "hide": false, + "instant": false, + "legendFormat": "Memory Limit", + "range": true, + "refId": "B" + } + ], + "title": "Memory Usage", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "prometheus", + "uid": "prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "CPU Limit" + }, + "properties": [ + { + "id": "custom.drawStyle", + "value": "line" + }, + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"core\", pod=\"kaniko-$build_id\", container=\"kaniko\"}[5m])) by (pod)", + "instant": false, + "legendFormat": "CPU Usage", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "editorMode": "code", + "expr": "sum(kube_pod_container_resource_limits{namespace=\"core\", pod=\"kaniko-$build_id\", container=\"kaniko\", resource=\"cpu\"}) by (pod)", + "hide": false, + "instant": false, + "legendFormat": "CPU Limit", + "range": true, + "refId": "B" + } + ], + "title": "CPU Usage", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "loki", + "uid": "datacoves_loki" + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 9, + "options": { + "dedupStrategy": "none", + "enableLogDetails": false, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": true, + "sortOrder": "Ascending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "datacoves_loki" + }, + "editorMode": "code", + "expr": "{namespace=\"core\", pod=\"kaniko-$build_id\", container=\"kaniko\"}", + "queryType": "range", + "refId": "A" + } + ], + "title": "Kaniko Logs", + "type": "logs" + }, + { + "datasource": { + "default": false, + "type": "loki", + "uid": "datacoves_loki" + }, + "gridPos": { + "h": 11, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 2, + "options": { + "dedupStrategy": "none", + "enableLogDetails": false, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": false, + "sortOrder": "Ascending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "datacoves_loki" + }, + "editorMode": "code", + "expr": "{namespace=\"core\", pod=\"kaniko-$build_id\", container=\"sidecar-webhook\"}", + "queryType": "range", + "refId": "A" + } + ], + "title": "Webhook Logs", + "type": "logs" + } + ], + "refresh": "5s", + "schemaVersion": 39, + "tags": [ + "Datacoves", + "Profile Image Set" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "Default", + "value": "Default" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(kube_pod_labels{label_app=\"kaniko\"},label_k8s_datacoves_com_kaniko_profile_name)", + "hide": 0, + "includeAll": false, + "label": "Profile", + "multi": false, + "name": "profile_name", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(kube_pod_labels{label_app=\"kaniko\"},label_k8s_datacoves_com_kaniko_profile_name)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "current": { + "selected": false, + "text": "89d5c26117", + "value": "89d5c26117" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(kube_pod_labels{label_k8s_datacoves_com_kaniko_profile_name=\"$profile_name\"},label_k8s_datacoves_com_kaniko_build_id)", + "hide": 0, + "includeAll": false, + "label": "Build ID", + "multi": false, + "name": "build_id", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(kube_pod_labels{label_k8s_datacoves_com_kaniko_profile_name=\"$profile_name\"},label_k8s_datacoves_com_kaniko_build_id)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "current": { + "selected": false, + "text": "pi1-airflow-airflow-base", + "value": "pi1-airflow-airflow-base" + }, + "datasource": { + "type": "prometheus", + "uid": "prometheus" + }, + "definition": "label_values(kube_pod_labels{label_k8s_datacoves_com_kaniko_build_id=\"$build_id\"},label_k8s_datacoves_com_kaniko_image)", + "hide": 0, + "includeAll": false, + "label": "Image", + "multi": false, + "name": "image", + "options": [], + "query": { + "qryType": 1, + "query": "label_values(kube_pod_labels{label_k8s_datacoves_com_kaniko_build_id=\"$build_id\"},label_k8s_datacoves_com_kaniko_image)", + "refId": "PrometheusVariableQueryEditor-VariableQuery" + }, + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Profile Image Set", + "uid": "profile_image_set", + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/scripts/observability/grafana/dashboards/redis.json b/scripts/observability/grafana/dashboards/redis.json new file mode 100644 index 00000000..47426844 --- /dev/null +++ b/scripts/observability/grafana/dashboards/redis.json @@ -0,0 +1,1399 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "Prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.5.2" + }, + { + "type": "panel", + "id": "graph", + "name": "Graph", + "version": "" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "singlestat", + "name": "Singlestat", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "timezone": "browser", + "title": "Redis", + "uid": "datacoves-redis", + "id": null, + "version": 4, + "description": "Redis Dashboard for Prometheus Redis Exporter, it works with helm stable/redis-ha exporter.\r\n\r\nIf you missing redis memory utilization, please modify \"maxmemory\" value in values.yaml", + "editable": false, + "gnetId": 11835, + "graphTooltip": 0, + "iteration": 1583145402535, + "links": [], + "panels": [ + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "editable": true, + "error": false, + "format": "s", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 2, + "x": 0, + "y": 0 + }, + "id": 9, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "max(max_over_time(redis_uptime_in_seconds{instance=~\"$instance\"}[$__interval]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 1800 + } + ], + "thresholds": "", + "title": "Uptime", + "type": "singlestat", + "valueFontSize": "70%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(245, 54, 54, 0.9)", + "rgba(237, 129, 40, 0.89)", + "rgba(50, 172, 45, 0.97)" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "editable": true, + "error": false, + "format": "none", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": false, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 2, + "x": 2, + "y": 0 + }, + "hideTimeOverride": true, + "id": 12, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "redis_connected_clients{instance=~\"$instance\"}", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 2 + } + ], + "thresholds": "", + "timeFrom": "1m", + "timeShift": null, + "title": "Clients", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "cacheTimeout": null, + "colorBackground": false, + "colorValue": false, + "colors": [ + "rgba(50, 172, 45, 0.97)", + "rgba(237, 129, 40, 0.89)", + "rgba(245, 54, 54, 0.9)" + ], + "datasource": "${DS_PROMETHEUS}", + "decimals": 0, + "editable": true, + "error": false, + "format": "percent", + "gauge": { + "maxValue": 100, + "minValue": 0, + "show": true, + "thresholdLabels": false, + "thresholdMarkers": true + }, + "gridPos": { + "h": 7, + "w": 4, + "x": 4, + "y": 0 + }, + "hideTimeOverride": true, + "id": 11, + "interval": null, + "isNew": true, + "links": [], + "mappingType": 1, + "mappingTypes": [ + { + "name": "value to text", + "value": 1 + }, + { + "name": "range to text", + "value": 2 + } + ], + "maxDataPoints": 100, + "nullPointMode": "connected", + "nullText": null, + "options": {}, + "postfix": "", + "postfixFontSize": "50%", + "prefix": "", + "prefixFontSize": "50%", + "rangeMaps": [ + { + "from": "null", + "text": "N/A", + "to": "null" + } + ], + "sparkline": { + "fillColor": "rgba(31, 118, 189, 0.18)", + "full": false, + "lineColor": "rgb(31, 120, 193)", + "show": true + }, + "tableColumn": "", + "targets": [ + { + "expr": "100 * (redis_memory_used_bytes{instance=~\"$instance\"} / redis_memory_max_bytes{instance=~\"$instance\"} )", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "", + "metric": "", + "refId": "A", + "step": 2 + } + ], + "thresholds": "80,95", + "timeFrom": "1m", + "timeShift": null, + "title": "Memory Usage", + "type": "singlestat", + "valueFontSize": "80%", + "valueMaps": [ + { + "op": "=", + "text": "N/A", + "value": "null" + } + ], + "valueName": "current" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 0 + }, + "hiddenSeries": false, + "id": 2, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(redis_commands_processed_total{instance=~\"$instance\"}[1m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "", + "metric": "A", + "refId": "A", + "step": 240, + "target": "" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Commands Executed / sec", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "decimals": 2, + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 0 + }, + "hiddenSeries": false, + "id": 1, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": false, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": true, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "irate(redis_keyspace_hits_total{instance=~\"$instance\"}[5m])", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "hits", + "metric": "", + "refId": "A", + "step": 240, + "target": "" + }, + { + "expr": "irate(redis_keyspace_misses_total{instance=~\"$instance\"}[5m])", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "misses", + "metric": "", + "refId": "B", + "step": 240, + "target": "" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Hits / Misses per Sec", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": "", + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "max": "#BF1B00" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 7 + }, + "hiddenSeries": false, + "id": 7, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "hideEmpty": false, + "hideZero": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "null as zero", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "redis_memory_used_bytes{instance=~\"$instance\"} ", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "used", + "metric": "", + "refId": "A", + "step": 240, + "target": "" + }, + { + "expr": "redis_memory_max_bytes{instance=~\"$instance\"} ", + "format": "time_series", + "hide": false, + "intervalFactor": 2, + "legendFormat": "max", + "refId": "B", + "step": 240 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total Memory Usage", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": 0, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 7 + }, + "hiddenSeries": false, + "id": 10, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "rate(redis_net_input_bytes_total{instance=~\"$instance\"}[5m])", + "format": "time_series", + "intervalFactor": 2, + "legendFormat": "{{ input }}", + "refId": "A", + "step": 240 + }, + { + "expr": "rate(redis_net_output_bytes_total{instance=~\"$instance\"}[5m])", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{ output }}", + "refId": "B", + "step": 240 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Network I/O", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "bytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fill": 7, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 14 + }, + "hiddenSeries": false, + "id": 5, + "isNew": true, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "max": false, + "min": false, + "rightSide": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum (redis_db_keys{instance=~\"$instance\"}) by (db)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{ db }} ", + "refId": "A", + "step": 240, + "target": "" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Total Items per DB", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "none", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fill": 7, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 14 + }, + "hiddenSeries": false, + "id": 13, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "sum (redis_db_keys{instance=~\"$instance\"}) - sum (redis_db_keys_expiring{instance=~\"$instance\"}) ", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "not expiring", + "refId": "A", + "step": 240, + "target": "" + }, + { + "expr": "sum (redis_db_keys_expiring{instance=~\"$instance\"}) ", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "expiring", + "metric": "", + "refId": "B", + "step": 240 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Expiring vs Not-Expiring Keys", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": { + "evicts": "#890F02", + "memcached_items_evicted_total{instance=\"172.17.0.1:9150\",job=\"prometheus\"}": "#890F02", + "reclaims": "#3F6833" + }, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fill": 1, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 21 + }, + "hiddenSeries": false, + "id": 8, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 2, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "alias": "reclaims", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "sum(rate(redis_expired_keys_total{instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "hide": false, + "interval": "", + "intervalFactor": 2, + "legendFormat": "expired", + "metric": "", + "refId": "A", + "step": 240, + "target": "" + }, + { + "expr": "sum(rate(redis_evicted_keys_total{instance=~\"$instance\"}[5m])) by (instance)", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "evicted", + "refId": "B", + "step": 240 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Expired / Evicted", + "tooltip": { + "msResolution": false, + "shared": true, + "sort": 0, + "value_type": "cumulative" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "editable": true, + "error": false, + "fill": 8, + "fillGradient": 0, + "grid": {}, + "gridPos": { + "h": 7, + "w": 12, + "x": 12, + "y": 21 + }, + "hiddenSeries": false, + "id": 14, + "isNew": true, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "connected", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": true, + "steppedLine": false, + "targets": [ + { + "expr": "topk(5, irate(redis_commands_total{instance=~\"$instance\"} [1m]))", + "format": "time_series", + "interval": "", + "intervalFactor": 2, + "legendFormat": "{{ cmd }}", + "metric": "redis_command_calls_total", + "refId": "A", + "step": 240 + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Command Calls / sec", + "tooltip": { + "msResolution": true, + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "${DS_PROMETHEUS}", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 7, + "w": 12, + "x": 0, + "y": 28 + }, + "hiddenSeries": false, + "id": 16, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "dataLinks": [] + }, + "percentage": false, + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "redis_connected_clients{instance=\"$instance\"}", + "format": "time_series", + "intervalFactor": 1, + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Redis connected clients", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "refresh": "30s", + "schemaVersion": 21, + "style": "dark", + "tags": [ + "Redis", + "Datacoves" + ], + "templating": { + "list": [ + { + "current": { + "text": "Prometheus", + "value": "Prometheus" + }, + "hide": 0, + "includeAll": false, + "label": "Prometheus", + "multi": false, + "name": "DS_PROMETHEUS", + "options": [], + "query": "prometheus", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "allValue": null, + "current": {}, + "datasource": "$DS_PROMETHEUS", + "definition": "label_values(redis_up, namespace)", + "hide": 0, + "includeAll": false, + "label": "Namespace", + "multi": false, + "name": "namespace", + "options": [], + "query": "label_values(redis_up, namespace)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$DS_PROMETHEUS", + "definition": "label_values(redis_up{namespace=\"$namespace\"}, pod)", + "hide": 0, + "includeAll": false, + "label": "Pod Name", + "multi": false, + "name": "pod_name", + "options": [], + "query": "label_values(redis_up{namespace=\"$namespace\"}, pod)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + }, + { + "allValue": null, + "current": {}, + "datasource": "$DS_PROMETHEUS", + "definition": "label_values(redis_up{namespace=\"$namespace\", pod=\"$pod_name\"}, instance)", + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "instance", + "options": [], + "query": "label_values(redis_up{namespace=\"$namespace\", pod=\"$pod_name\"}, instance)", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "tagValuesQuery": "", + "tags": [], + "tagsQuery": "", + "type": "query", + "useTags": false + } + ] + }, + "time": { + "from": "now-24h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + } +} \ No newline at end of file diff --git a/scripts/observability/setup_observability_grafana_config.py b/scripts/observability/setup_observability_grafana_config.py new file mode 100644 index 00000000..65206c3b --- /dev/null +++ b/scripts/observability/setup_observability_grafana_config.py @@ -0,0 +1,282 @@ +import base64 +import json +from http import HTTPStatus +from pathlib import Path + +import requests +import urllib3 + +from lib.config import config as the +from lib.config_files import mkdir, write_yaml +from scripts import k8s_utils +from scripts.k8s_utils import kubectl_output +from scripts.observability.setup_observability_utils import get_grafana_orgs +from scripts.setup_core import gen_core_api_service_monitor as setup_metrics_on_api +from scripts.setup_core import ( + gen_core_flower_service_monitor as setup_metrics_on_flower, +) +from scripts.setup_core import setup_redis as setup_metrics_on_redis + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + +NAMESPACE = "prometheus" +NAMESPACE_CORE = "core" +DEFAULT_RESOURCE = None +SERVICE_MONITOR_OUTDIR = None + + +def setup_grafana_config(): + global SERVICE_MONITOR_OUTDIR + SERVICE_MONITOR_OUTDIR = ( + the.PROMETHEUS_DIR / the.cluster_domain / "service_monitors" + ) + mkdir(SERVICE_MONITOR_OUTDIR) + print("Setting up grafana") + update_service_account_cluster() + grafana_update_main_org() + gen_grafana_dashboards() + grafana_orgs = get_grafana_orgs() + setup_grafana_orgs(grafana_orgs) + + +def update_service_account_cluster(): + print("Updating Grafana service account") + k8s_utils.wait_for_deployment(NAMESPACE_CORE, "api") + api_pod = k8s_utils.pod_for_deployment(NAMESPACE_CORE, "api") + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container="api" + ) + username, password = get_grafana_admin_credentials() + data = { + "grafana": { + "username": username, + "password": password, + "token": get_grafana_token(sa_name="core-api"), + "description": "Service account for Grafana API", + } + } + json_data = json.dumps(data) + json_data_bytes = json_data.encode("utf-8") + json_data_b64 = base64.b64encode(json_data_bytes).decode("utf-8") + run_in_api_pod(f"./manage.py save_service_account --json-data-b64 {json_data_b64}") + + +def get_grafana_admin_credentials() -> tuple: + credential_secrets = kubectl_output( + f"-n {NAMESPACE} get secret grafana-admin-credentials " "-o jsonpath='{.data}'" + ) + credential_secrets = json.loads(credential_secrets.replace("'", "").strip()) + user = ( + base64.b64decode(credential_secrets["admin-user"]) + .decode("utf-8") + .replace("\n", "") + ) + password = ( + base64.b64decode(credential_secrets["admin-password"]) + .decode("utf-8") + .replace("\n", "") + ) + return user, password + + +def grafana_update_main_org(): + k8s_utils.wait_for_deployment(NAMESPACE, "prometheus-grafana") + + user, password = get_grafana_admin_credentials() + base_url = f"https://{user}:{password}@grafana.{the.cluster_domain}/api" + + r = requests.put( + f"{base_url}/orgs/1", + headers={"Content-Type": "application/json; charset=utf-8"}, + json={"name": "datacoves-main"}, + verify=False, + ) + if r.ok: + print("Main organization updated") + + else: + print("Update main organization failed:", r.text) + + # Delete Main org + r = requests.get(f"{base_url}/orgs/name/Main%20Org%2E", verify=False) + if r.ok: + org_id = r.json()["id"] + r = requests.delete(f"{base_url}/orgs/{org_id}", verify=False) + + +def gen_grafana_folder(folder_name: str): + token = get_grafana_token() + if not token: + print("Failed to generate token on Grafana") + return None + + folder_id = None + headers = {"Content-Type": "application/json; charset=utf-8"} + base_url = f"https://grafana.{the.cluster_domain}/api" + headers.update({"Authorization": f"Bearer {token}"}) + r = requests.get(f"{base_url}/folders", headers=headers, verify=False) + + if r.ok: + for folder in r.json(): + if folder["title"] == folder_name: + return folder["id"] + + if not folder_id: + r = requests.post( + f"{base_url}/folders", + headers=headers, + json={"title": folder_name}, + verify=False, + ) + if r.ok: + return r.json()["id"] + + else: + print(f"Failed to create the folder named {folder_name} on Grafana") + return None + + +def gen_grafana_dashboards(): + print("Generating Grafana dashboards") + global SERVICE_MONITOR_OUTDIR + if not k8s_utils.exists_namespace(ns=NAMESPACE): + print("Observability stack not installed") + return + + k8s_utils.wait_for_deployment(NAMESPACE, "prometheus-grafana") + if not k8s_utils.exists_resource("core", "servicemonitors", "redis"): + setup_metrics_on_redis() + + service_monitor_api = setup_metrics_on_api() + if service_monitor_api: + outdir = SERVICE_MONITOR_OUTDIR / "service_monitor_api.yaml" + write_yaml(outdir, service_monitor_api) + k8s_utils.kubectl(f"apply -f {SERVICE_MONITOR_OUTDIR}") + + service_monitor_flower = setup_metrics_on_flower() + if service_monitor_flower: + outdir = SERVICE_MONITOR_OUTDIR / "service_monitor_flower.yaml" + write_yaml(outdir, service_monitor_flower) + k8s_utils.kubectl(f"apply -f {SERVICE_MONITOR_OUTDIR}") + + folder_id = gen_grafana_folder("Datacoves") + if folder_id: + token = get_grafana_token() + if not token: + print("Failed to generate token on Grafana") + return + + headers = {"Content-Type": "application/json; charset=utf-8"} + base_url = f"https://grafana.{the.cluster_domain}/api" + headers.update({"Authorization": f"Bearer {token}"}) + load_grafana_dashboard(folder_id, base_url, headers) + + +def get_grafana_token(sa_name="sa-install") -> str: + user, password = get_grafana_admin_credentials() + headers = {"Content-Type": "application/json; charset=utf-8"} + base_url = f"https://{user}:{password}@grafana.{the.cluster_domain}/api" + token_name = f"{sa_name}-token" + + # validate if the service account exists + service_account_id = None + r = requests.get( + f"{base_url}/serviceaccounts/search?perpage=10&page=1&query={sa_name}", + verify=False, + ) + + if r.ok: + data_sa = r.json() + if data_sa["totalCount"] > 0: + service_account_id = data_sa["serviceAccounts"][0]["id"] + + # if the service account does not exist it's created + if not service_account_id: + r = requests.post( + f"{base_url}/serviceaccounts", + headers=headers, + json={"name": sa_name, "role": "Admin"}, + verify=False, + ) + if r.status_code == HTTPStatus.CREATED: + service_account_id = r.json()["id"] + else: + print("Failed to generate service account on Grafana:", r.text) + return None + + # clean tokens on the service account + r = requests.get( + f"{base_url}/serviceaccounts/{service_account_id}/tokens", verify=False + ) + if r.ok: + for token in r.json(): + if token["name"] == token_name: + r = requests.delete( + f"{base_url}/serviceaccounts/{service_account_id}/tokens/{token['id']}", + verify=False, + ) + + # create token on the service account + r = requests.post( + f"{base_url}/serviceaccounts/{service_account_id}/tokens", + headers=headers, + json={"name": token_name}, + verify=False, + ) + return r.json()["key"] if r.ok else None + + +def load_grafana_dashboard(folder_id: int, base_url: str, headers: dict): + path_list = Path( + the.DATACOVES_DIR / "scripts/observability/grafana/dashboards" + ).glob("**/*.json") + for path in path_list: + with open(path, "r") as f: + dashboard = None + try: + dashboard = json.loads(f.read()) + dashboard_payload = { + "dashboard": dashboard, + "folderId": folder_id, + "message": f"Changes made to release {the.config['release']}", + "overwrite": True, + } + + r = requests.post( + f"{base_url}/dashboards/db", + headers=headers, + json=dashboard_payload, + verify=False, + ) + if r.ok: + print( + f"Dashboard {dashboard['title']} created or updated successfully." + ) + + else: + print( + f"Dashboard {dashboard['title']} could not be created or updated:", + r.text, + ) + except Exception as e: + dashboard_name = dashboard["title"] if dashboard else "unknown" + print("Failed to install dashboard", dashboard_name, e) + + +def setup_grafana_orgs(orgs): + """Create orgs on grafana using their API""" + user, password = get_grafana_admin_credentials() + base_url = f"https://{user}:{password}@grafana.{the.cluster_domain}/api" + + for org in orgs: + r = requests.post( + f"{base_url}/orgs", + headers={"Content-Type": "application/json; charset=utf-8"}, + json={"name": org}, + verify=False, + ) + if r.ok: + print(f"Organization {org} created") + else: + if r.json()["message"] != "Organization name taken": + print(f"Organization {org} creation error: {r.text}") diff --git a/scripts/observability/setup_observability_main.py b/scripts/observability/setup_observability_main.py new file mode 100644 index 00000000..1ca7acd1 --- /dev/null +++ b/scripts/observability/setup_observability_main.py @@ -0,0 +1,46 @@ +import questionary +from questionary.prompts.common import Choice +from rich import print as rprint + +from lib.config import config as the + +from .setup_observability_grafana_config import setup_grafana_config +from .setup_observability_stack import setup_stack + +NAMESPACE = "prometheus" +DEFAULT_RESOURCE = None + + +def setup(cluster_domain, automate: bool = False): + # TODO: improve this + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + if not the.config["observability_stack"]: + rprint( + "[yellow]Observability stack is disabled in the [bold]cluster-params.yaml[/bold]" + ) + return + + if automate: + selected = ["observability", "grafana_config"] + + else: + selected = questionary.checkbox( + "Uncheck the tasks you want to skip", + choices=[ + Choice( + "Setup Observability Stack", value="observability", checked=True + ), + Choice( + "Setup Grafana Configuration", value="grafana_config", checked=False + ), + ], + ).ask() + + if not selected: + return + + if "observability" in selected: + setup_stack(cluster_domain) + if "grafana_config" in selected: + setup_grafana_config() diff --git a/scripts/observability/setup_observability_stack.py b/scripts/observability/setup_observability_stack.py new file mode 100644 index 00000000..b779c269 --- /dev/null +++ b/scripts/observability/setup_observability_stack.py @@ -0,0 +1,1022 @@ +import base64 +import json + +import yaml + +from lib.config import config as the +from lib.config_files import emit_yamls, load_text_file, load_yaml, mkdir, write_yaml +from lib.tools import parse_image_uri +from scripts import k8s_utils +from scripts.k8s_utils import helm, kubectl +from scripts.observability.setup_observability_utils import ( + gen_image_spec as _gen_image_spec, +) +from scripts.observability.setup_observability_utils import ( + get_grafana_orgs as _get_grafana_orgs, +) +from scripts.observability.setup_observability_utils import ( + get_grafana_postgres_credentials as _get_grafana_postgres_credentials, +) +from scripts.observability.setup_observability_utils import ( + get_resources as _get_resources, +) +from scripts.setup_core import get_api_pod + +NAMESPACE = "prometheus" +MINIO_BUCKETS_NAMES = { + "loki": "loki", + "mimir-tsdb": "mimir-tsdb", +} +MINIO_USERNAME = "admin" + + +def setup_stack(cluster_domain: str): + gen_prometheus_yaml(cluster_domain) + setup_minio() + setup_grafana(cluster_domain) + setup_grafana_agent() + setup_mimir() + setup_cortex_tenant() + setup_loki() + + +def setup_grafana(cluster_domain: str): + oidc_data = _setup_oidc_data() + grafana_orgs = _get_grafana_orgs() + grafana_orgs.append("datacoves-main") + grafana_db = _get_grafana_postgres_credentials() + + idp_provider = oidc_data["idp_provider"] + idp_client_id = oidc_data["idp_client_id"] + idp_client_secret = oidc_data["idp_client_secret"] + idp_scopes = oidc_data["idp_scopes"] + idp_provider_url = oidc_data["idp_provider_url"] + + patch = {"imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}]} + p = json.dumps(patch, separators=(",", ":")) + kubectl(f"patch -n prometheus serviceaccount default -p {p}") + helm( + "repo add prometheus-community https://prometheus-community.github.io/helm-charts" + ) + helm("repo add grafana https://grafana.github.io/helm-charts") + helm("repo update prometheus-community") + helm("repo update grafana") + + data = { + "defaultRules": {"create": False}, + "global": { + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}] + }, + "server": {"image": {"repository": None, "tag": None}}, + "grafana": { + # FIXME: Stop sending passwords in values.yaml and remove next line + "assertNoLeakedSecrets": False, + "ingress": { + "enabled": False, + }, + "persistence": { + "enabled": True, + "accessModes": ["ReadWriteOnce"], + "size": "40Gi" if not the.cluster_is_localhost() else "1Gi", + }, + "image": _gen_image_spec("grafana/grafana"), + "nodeSelector": the.VOLUMED_NODE_SELECTOR, + "grafana.ini": { + # Uncomment this line whenever you need to debug grafana (default level is 'info') + # "log.console": {"level": "debug"}, + "grafana_net": {"url": "https://grafana.net"}, + "server": { + "domain": f"grafana.{the.cluster_domain}", + "root_url": "https://%(domain)s", + }, + "auth": { + "oauth_auto_login": True, + "disable_login_form": True, + "disable_signout_menu": False, + "oauth_allow_insecure_email_lookup": True, + "oauth_skip_org_role_update_sync": False, + }, + "auth.generic_oauth": { + "enabled": True, + "icon": "signin", + "empty_scopes": False, + "allowed_domains": None, + "allow_sign_up": True, + "auth_style": None, + "name": idp_provider, + "client_id": idp_client_id, + "client_secret": idp_client_secret, + "scopes": " ".join(idp_scopes), + "auth_url": f"{idp_provider_url}/auth/authorize/", + "token_url": "http://core-api-svc.core.svc/auth/token/", + "api_url": f"{idp_provider_url}/auth/userinfo/", + "email_attribute_path": "email", + "role_attribute_path": ( + "contains(permissions[*], '*|write') && 'GrafanaAdmin' ||" + " contains(permissions[*], 'configuration|write') && 'Admin' ||" + " contains(permissions[*], 'dashboards|write') && 'Editor' ||" + " contains(permissions[*], 'dashboards|read') && 'Viewer'" + ), + "role_attribute_strict": True, + "allow_assign_grafana_admin": True, + "skip_org_role_sync": False, + "org_attribute_path": "groups", + "org_mapping": " ".join( + [f"{org}:{org}:Viewer" for org in grafana_orgs] + ), + }, + "database": grafana_db, + "users": { + "auto_assign_org": False, + # "default_home_dashboard_uid": "datacoves_home", + # "home_page": "d/datacoves_home/datacoves-home", + }, + "security": {"allow_embedding": True}, + }, + "initChownData": {"image": _gen_image_spec("library/busybox")}, + "sidecar": { + "image": _gen_image_spec("kiwigrid/k8s-sidecar"), + "resources": _get_resources(resource_name="grafana_sidecar"), + "dashboards": { + "enabled": True, + "label": "grafana_dashboard", + "labelValue": None, + "searchNamespace": "ALL", + "resource": "configmap", + "annotations": {}, + "multicluster": { + "global": {"enabled": False}, + "etcd": {"enabled": False}, + }, + "provider": {"allowUiUpdates": False}, + }, + "datasources": { + "enabled": True, + "defaultDatasourceEnabled": True, + "isDefaultDatasource": False, + "annotations": {}, + "createPrometheusReplicasDatasources": False, + "label": "grafana_datasource", + # "url": "http://mimir-nginx/prometheus", + "alertmanager": {"enabled": False}, + "prune": True, + }, + }, + "resources": _get_resources(resource_name="grafana"), + "admin": {"existingSecret": "grafana-admin-credentials"}, + "testFramework": { + "image": _gen_image_spec("bats/bats"), + }, + }, + "alertmanager": { + "enabled": True, + "alertmanagerSpec": { + "image": _gen_image_spec("quay.io/prometheus/alertmanager"), + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources(resource_name="prometheus_alertmanager"), + }, + "config": { + "global": {"resolve_timeout": "10m"}, + "receivers": [ + { + "name": "notifications", + "webhook_configs": [ + { + "send_resolved": True, + "url": "http://core-api-svc.core.svc/api/alerts", + } + ], + } + ], + "route": { + "group_by": ["cluster", "alertname", "namespace"], + "group_interval": "5m", + "group_wait": "30s", + "receiver": "notifications", + "repeat_interval": "4h", + "routes": [{"receiver": "notifications"}], + }, + }, + }, + "kube-state-metrics": { + "image": _gen_image_spec( + "registry.k8s.io/kube-state-metrics/kube-state-metrics" + ), + "extraArgs": [get_extra_args()], + "resources": _get_resources(resource_name="kube_state_metrics"), + }, + "prometheus-node-exporter": { + "image": _gen_image_spec("quay.io/prometheus/node-exporter"), + # for some reason we need to define the image pull secrets for node exporter + "serviceAccount": { + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}] + }, + "resources": _get_resources(resource_name="prometheus_node_exporter"), + }, + "prometheus": { + "prometheusSpec": { + "resources": _get_resources(resource_name="prometheus"), + "image": _gen_image_spec("quay.io/prometheus/prometheus"), + "storageSpec": {"emptyDir": {}}, + "enableAdminAPI": False, + "enableRemoteWriteReceiver": True, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "remoteWrite": [{"url": "http://cortex-tenant:8080/push"}], + "additionalScrapeConfigs": [ + { + "job_name": "loki", + "scheme": "http", + "static_configs": [ + {"targets": ["loki-loki-distributed-ingester:3100"]} + ], + } + ], + "retention": "10d", # How long to retain metrics + "retentionSize": "99GB", # Maximum size of metrics + } + }, + "prometheusOperator": { + "image": _gen_image_spec("quay.io/prometheus-operator/prometheus-operator"), + "prometheusConfigReloader": { + "image": _gen_image_spec( + "quay.io/prometheus-operator/prometheus-config-reloader" + ) + }, + "admissionWebhooks": { + "enabled": True, + "patch": { + "image": _gen_image_spec( + "registry.k8s.io/ingress-nginx/kube-webhook-certgen" + ) + }, + }, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources(resource_name="prometheus_operator"), + }, + "additionalPrometheusRulesMap": { + "datacoves-rules": load_yaml( + "scripts/observability/data/grafana-rules.yaml" + ), + }, + } + + if grafana_db["host"] == "grafana-postgres-postgresql.prometheus": + setup_database() + + values_file = ".generated/prometheus-values.yaml" + write_yaml(values_file, data) + helm( + f"-n {NAMESPACE} upgrade --install prometheus prometheus-community/kube-prometheus-stack --version 65.1.1", + "-f", + values_file, + ) + + +def get_extra_args(): + pods_labels = ",".join( + [ + "app", + "airflow-worker", + "airbyte", + "dag_id", + "run_id", + "task_id", + "k8s.datacoves.com/kanikoBuildId", + "k8s.datacoves.com/kanikoImage", + "k8s.datacoves.com/kanikoProfileId", + "k8s.datacoves.com/kanikoProfileName", + "k8s.datacoves.com/kanikoEnvSlugs", + ] + ) + + namespace_labels = ",".join( + [ + "k8s.datacoves.com/account", + "k8s.datacoves.com/environment-type", + "k8s.datacoves.com/project", + "k8s.datacoves.com/release", + "k8s.datacoves.com/workspace", + ] + ) + + node_labels = ",".join(the.NODE_SELECTORS_KEYS) + + metric_arg = ( + f"--metric-labels-allowlist=" + f"pods=[{pods_labels}]," + f"namespaces=[{namespace_labels}]," + f"nodes=[{node_labels}]" + ) + + return metric_arg + + +def setup_mimir(): + values = { + "image": _gen_image_spec("grafana/mimir", add_pull_secret=True, complete=False), + "compactor": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_compactor"), + "persistentVolume": { + "enabled": False, + }, + }, + "distributor": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_distributor"), + }, + "ingester": { + "replicas": 2, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_ingester"), + "zoneAwareReplication": { + "enabled": False, + }, + "persistentVolume": {"enabled": False, "size": "10Gi"}, + }, + "overrides_exporter": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_overrides_exporter"), + }, + "querier": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_querier"), + }, + "query_frontend": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_query_frontend"), + }, + "query_scheduler": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_query_scheduler"), + }, + "ruler": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_ruler"), + }, + "alertmanager": { + "enabled": False, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_alertmanager"), + }, + "store_gateway": { + "replicas": 1, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_store_gateway"), + "zoneAwareReplication": { + "enabled": False, + }, + "persistentVolume": { + "enabled": False, + }, + }, + "rollout_operator": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": _gen_image_spec("grafana/rollout-operator", complete=False), + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}], + "resources": _get_resources("mimir_rollout_operator"), + }, + "nginx": { + "replicas": 1, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("mimir_gateway"), + "image": _gen_image_spec( + "nginxinc/nginx-unprivileged", add_pull_secret=True + ), + }, + "metaMonitoring": { + "dashboards": { + "enabled": False, + }, + "prometheusRule": { + "enabled": False, + "mimirAlerts": False, + "mimirRules": False, + }, + }, + "minio": { + "enabled": False, + # "rootUser": "changeme", + # "rootPassword": "changeme", + "persistence": { + "size": "100Gi" if not the.cluster_is_localhost() else "1Gi" + }, + "image": _gen_image_spec("quay.io/minio/minio", complete=False), + "mcImage": _gen_image_spec("quay.io/minio/mc", complete=False), + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}], + "nodeSelector": the.VOLUMED_NODE_SELECTOR, + "resources": _get_resources("minio"), + }, + "mimir": { + "structuredConfig": { + "multitenancy_enabled": True, + "tenant_federation": { + "enabled": True, + }, + "limits": { + "ingestion_rate": 600000, + "ingestion_burst_size": 10000, + "compactor_blocks_retention_period": "10d", + }, + "blocks_storage": { + "tsdb": {"retention_period": "240h"}, # 10 days + "backend": "s3", + "s3": { + "endpoint": "minio:9000", + "bucket_name": MINIO_BUCKETS_NAMES["mimir-tsdb"], + "access_key_id": MINIO_USERNAME, + "secret_access_key": the.config["grafana"]["loki"]["password"], + "insecure": True, + }, + }, + }, + "runtimeConfig": {"distributor_limits": {"max_ingestion_rate": 600000}}, + }, + } + + values_file = ".generated/grafana-mimir-values.yaml" + write_yaml(values_file, values) + + # helm -n prometheus uninstall mimir + helm( + f"-n {NAMESPACE} upgrade --install mimir grafana/mimir-distributed --version 5.4.1", + "-f", + values_file, + ) + + +def setup_cortex_tenant(): + values = { + "image": _gen_image_spec( + "ghcr.io/blind-oracle/cortex-tenant", complete=False, add_pull_secret=True + ), + "replicas": 3, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources("cortex_tenant"), + "config": { + "target": "http://mimir-nginx/api/v1/push", + "tenant": {"label": "namespace"}, + "log_response_errors": True, + }, + "envs": [ + {"name": "CT_MAX_CONNS_PER_HOST", "value": 300}, + {"name": "CT_LOG_RESPONSE_ERRORS", "value": False}, + ], + "serviceMonitor": { + "enabled": False, + "labels": {"release": NAMESPACE}, + }, + } + + values_file = ".generated/cortex-tenant-values.yaml" + write_yaml(values_file, values) + + helm("repo add cortex-tenant https://blind-oracle.github.io/cortex-tenant") + helm( + f"-n {NAMESPACE} upgrade --install cortex-tenant cortex-tenant/cortex-tenant --version 0.6.0", + "-f", + values_file, + ) + + +def setup_database(): + grafana_database_values_file = ".generated/grafana-database-values.yaml" + grafana_database_data = load_yaml( + "scripts/observability/data/grafana-database-values.yaml" + ) + grafana_database_data.update( + { + "auth": { + "postgresPassword": the.config["grafana"]["postgres_password"], + "database": "grafana", + }, + "image": _gen_image_spec("bitnami/postgresql", add_pull_secret=True), + "primary": { + "nodeSelector": the.VOLUMED_NODE_SELECTOR, + "resources": _get_resources("postgresql"), + }, + } + ) + + write_yaml(grafana_database_values_file, grafana_database_data) + helm( + f"-n {NAMESPACE} upgrade --install grafana-postgres bitnami/postgresql --version 12.4.2", + "-f", + grafana_database_values_file, + ) + + +def setup_grafana_agent(): + """Installs the grafana agent. + The grafana agent is a tool that can be used to configure and manage grafana + instances. In this scenario we use it to install the eventhandler integration. + """ + + config_reloader_image = the.docker_image_name_and_tag( + "ghcr.io/jimmidyson/configmap-reload" + ) + cr_registry, cr_repository, cr_tag = parse_image_uri(config_reloader_image) + + grafana_agent_image = the.docker_image_name_and_tag("grafana/agent") + ga_registry, ga_repository, ga_tag = parse_image_uri(grafana_agent_image) + + # https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/eventhandler-config/ + # sample query: {agent_hostname="eventhandler"} |= `` + config = { + "server": {"log_level": "info"}, + "integrations": { + "eventhandler": {"cache_path": "/etc/eventhandler/eventhandler.cache"} + }, + "logs": { + "configs": [ + { + "name": "default", + "clients": [ + { + "url": "http://loki-loki-distributed-gateway:80/loki/api/v1/push", + "tenant_id": "core", + } + ], + "positions": {"filename": "/tmp/positions0.yaml"}, + } + ] + }, + } + + data = { + "agent": { + "mode": "static", + "configMap": { + "content": yaml.dump(config), + }, + # https://grafana.com/docs/agent/latest/static/configuration/integrations/integrations-next/ + "extraArgs": ["--enable-features", "integrations-next"], + "resources": _get_resources(resource_name="grafana_agent"), + }, + "configReloader": { + "image": { + "registry": cr_registry, + "repository": cr_repository, + "tag": cr_tag, + }, + "resources": _get_resources(resource_name="grafana_config_reloader"), + }, + "image": { + "registry": ga_registry, + "repository": ga_repository, + "tag": ga_tag, + "pullSecrets": [{"name": the.config["docker_config_secret_name"]}], + }, + } + values_file = ".generated/grafana-agent-values.yaml" + write_yaml(values_file, data) + helm( + f"-n {NAMESPACE} upgrade --install grafana-agent grafana/grafana-agent --version 0.42.0", + "-f", + values_file, + ) + + +def setup_minio(): + """Installs minio in the cluster + Minio is needed by Loki as an S3-compatible store + """ + + minio_values_file = ".generated/minio-values.yaml" + default_bucket = ",".join(list(MINIO_BUCKETS_NAMES.values())) + minio_data = { + "auth": { + "rootUser": MINIO_USERNAME, + "rootPassword": the.config["grafana"]["loki"]["password"], + }, + "defaultBuckets": default_bucket, + "persistence": { + "enabled": True, + "size": "100Gi" if not the.cluster_is_localhost() else "1Gi", + }, + "nodeSelector": the.VOLUMED_NODE_SELECTOR, + "image": _gen_image_spec("bitnami/minio", add_pull_secret=True), + "resources": _get_resources(resource_name="minio"), + } + write_yaml(minio_values_file, minio_data) + helm( + f"-n {NAMESPACE} upgrade --install minio bitnami/minio --version 11.x.x", + "-f", + minio_values_file, + ) + + +def get_loki_storage_config(): + # https://grafana.com/docs/loki/latest/storage/#aws-deployment-s3-single-store + + storage_provider = the.config["grafana"].get("loki", {}).get("provider") + storage_config = { + "boltdb_shipper": { + "shared_store": "s3", + }, + "tsdb_shipper": { + "active_index_directory": "/var/loki/tsdb-index", + "cache_location": "/var/loki/tsdb-cache", + # Can be increased for faster performance over longer query periods, uses more disk space + "cache_ttl": "24h", + }, + } + if storage_provider == "aws": + access_key_id = the.config["grafana"]["loki"]["access_key"] + secret_access_key = the.config["grafana"]["loki"]["secret_key"] + region = the.config["grafana"]["loki"]["region"] + bucket = the.config["grafana"]["loki"]["bucket"] + + object_store = "aws" + storage_config.update( + { + "aws": { + "bucketnames": bucket, + "access_key_id": access_key_id, + "secret_access_key": secret_access_key, + "region": region, + }, + } + ) + + elif storage_provider == "azure": + object_store = "azure" + storage_config.update( + { + "azure": { + "account_name": the.config["grafana"]["loki"]["account_name"], + "account_key": the.config["grafana"]["loki"]["account_key"], + "container_name": the.config["grafana"]["loki"]["container_name"], + "endpoint_suffix": the.config["grafana"]["loki"]["endpoint_suffix"], + "request_timeout": "0", + }, + "boltdb_shipper": { + "shared_store": "azure", + }, + } + ) + + else: + bucket = MINIO_BUCKETS_NAMES["loki"] + password = the.config["grafana"]["loki"]["password"] + object_store = "aws" + storage_config.update( + { + "aws": { + "s3": f"http://{MINIO_USERNAME}:{password}@minio:9000/{bucket}", + "endpoint": "http://minio:9000", + "s3forcepathstyle": True, + }, + } + ) + + return object_store, storage_config + + +def setup_loki(): + loki_values_file = ".generated/loki-values.yaml" + object_store, storage_config = get_loki_storage_config() + gl_spec_image = _gen_image_spec("grafana/loki") + + loki_data = { + "enabled": True, + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}], + "ingester": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": gl_spec_image, + "resources": _get_resources(resource_name="loki_ingester"), + }, + "compactor": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": gl_spec_image, + "enabled": True, + "resources": _get_resources(resource_name="loki_compactor"), + }, + "distributor": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": gl_spec_image, + "resources": _get_resources(resource_name="loki_distributor"), + }, + "gateway": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": _gen_image_spec("nginxinc/nginx-unprivileged"), + "resources": _get_resources(resource_name="loki_gateway"), + "affinity": None, + }, + "indexGateway": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": gl_spec_image, + "enabled": False, + "resources": _get_resources(resource_name="loki_compactor"), + }, + "querier": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": gl_spec_image, + "resources": _get_resources(resource_name="loki_querier"), + }, + "queryFrontend": { + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "image": gl_spec_image, + "replicas": 2, # https://grafana.com/docs/loki/latest/get-started/components/#query-frontend + "maxUnavailable": 1, + "resources": _get_resources(resource_name="loki_query_frontend"), + "affinity": None, + }, + "image": gl_spec_image, + "serviceAccount": { + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}] + }, + "serviceMonitor": {"enabled": True}, + "loki": { + "structuredConfig": { + "auth_enabled": True, + "querier": {"multi_tenant_queries_enabled": True}, + "ruler": { + "remote_write": { + "enabled": True, + "client": { + "url": "http://prometheus-kube-prometheus-prometheus.prometheus:9090/api/v1/write" + }, + }, + # patched because of bug on how the wal dir is set in the helm chart + # https://github.com/grafana/loki/issues/9351#issuecomment-1535027620 + "wal": {"dir": "/var/loki/ruler-wal"}, + "alertmanager_url": "http://prometheus-kube-prometheus-alertmanager:9093", + }, + "ingester": { + # Disable chunk transfer which is not possible with statefulsets + # and unnecessary for boltdb-shipper + "max_transfer_retries": 0, + "chunk_idle_period": "30m", + "chunk_target_size": 1572864, + "max_chunk_age": "1h", + }, + "compactor": {"retention_enabled": True}, + "schema_config": { + "configs": [ + { + "from": "2020-09-07", + "store": "boltdb-shipper", + "object_store": object_store, + "schema": "v11", + "index": {"prefix": "loki_index_", "period": "24h"}, + }, + { + "from": "2024-10-01", + "store": "tsdb", + "object_store": object_store, + "schema": "v13", + "index": {"prefix": "loki_index_", "period": "24h"}, + }, + ] + }, + "storage_config": storage_config, + "limits_config": { + # promtail started to fail, so increased streams per user based on this comments: + # https://github.com/grafana/loki/issues/3335 + "max_global_streams_per_user": 0, + "retention_period": "240h", # 10 days + }, + "chunk_store_config": {"max_look_back_period": "240h"}, + } + }, + "ruler": { + "enabled": True, + "image": gl_spec_image, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "resources": _get_resources(resource_name="loki_ruler"), + "directories": { + "fake": { + "rules.yml": load_text_file( + "scripts/observability/data/loki-rules.yaml" + ) + } + }, + }, + } + + write_yaml(loki_values_file, loki_data) + helm( + f"-n {NAMESPACE} upgrade --install loki grafana/loki-distributed --version 0.79.3", + "-f", + loki_values_file, + ) + gen_promtail() + + +def gen_promtail(): + snippets = """\ + pipelineStages: + - cri: {} + - tenant: + source: namespace + """ + promtail_data = { + "config": { + "logLevel": "info", + "serverPort": 3101, + "clients": [ + { + "url": "http://loki-loki-distributed-gateway/loki/api/v1/push", + "tenant_id": "core", + }, + ], + "snippets": yaml.safe_load(snippets), + }, + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}], + "image": _gen_image_spec("grafana/promtail"), + "resources": _get_resources(resource_name="promtail"), + } + + promtail_values_file = ".generated/promtail-values.yaml" + write_yaml(promtail_values_file, promtail_data) + helm( + f"-n {NAMESPACE} upgrade --install promtail grafana/promtail --version 6.16.5", + "-f", + promtail_values_file, + ) + + +def gen_prometheus_yaml(cluster_domain): + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + + out_dir = the.PROMETHEUS_DIR / the.cluster_domain + mkdir(out_dir) + + files = { + "prometheus.yaml": { + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": NAMESPACE, + "labels": { + "k8s.datacoves.com/namespace": "prometheus", + }, + }, + }, + "ingress.yaml": _gen_ingress(), + "grafana-admin-credentials-secret.yaml": _gen_grafana_admin_credentials(), + "kustomization.yaml": { + "apiVersion": "kustomize.config.k8s.io/v1beta1", + "kind": "Kustomization", + "namespace": NAMESPACE, + "resources": [ + "prometheus.yaml", + "ingress.yaml", + "grafana-admin-credentials-secret.yaml", + ], + }, + } + + if the.config["generate_docker_secret"]: + files["kustomization.yaml"]["secretGenerator"] = [ + { + "name": the.config["docker_config_secret_name"], + "type": "kubernetes.io/dockerconfigjson", + "files": [".dockerconfigjson=docker-config.secret.json"], + "options": {"disableNameSuffixHash": True}, + } + ] + files["docker-config.secret.json"] = load_text_file( + the.SECRETS_DIR / "docker-config.secret.json" + ) + + emit_yamls(out_dir, files) + kubectl(f"apply -k {out_dir}") + + +def _gen_grafana_admin_credentials() -> dict: + return { + "apiVersion": "v1", + "kind": "Secret", + "metadata": {"name": "grafana-admin-credentials"}, + "type": "Opaque", + "data": { + "admin-user": base64.b64encode("adm-datacoves".encode("utf-8")).decode( + "utf-8" + ), + "admin-password": base64.b64encode( + the.config["grafana"]["admin_password"].encode("utf-8") + ).decode("utf-8"), + }, + } + + +def _setup_oidc_data(): + api_pod = get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod("core", api_pod, container="api") + subdomain = "grafana" + name = subdomain + path = "/login/generic_oauth" + + raw_data = run_in_api_pod( + f"./manage.py generate_cluster_oidc --name {name} --subdomain {subdomain} --path {path}", + capture_output=True, + ) + try: + # It will be the last line of output + json_data = raw_data.stdout.strip().split(b"\n")[-1] + data = json.loads(json_data) + return data + + except json.decoder.JSONDecodeError as e: + print("Got an error while processing the following output:", e) + print(raw_data.stdout) + raise + + +def _gen_ingress(): + cert_manager_issuer = the.config.get("cert_manager_issuer") + + rules = [ + { + "host": f"grafana.{the.cluster_domain}", + "http": { + "paths": [ + { + "path": "/", + "pathType": "Prefix", + "backend": { + "service": { + "name": "prometheus-grafana", + "port": {"number": 80}, + } + }, + } + ] + }, + }, + { + "host": f"minio-observability.{the.cluster_domain}", + "http": { + "paths": [ + { + "path": "/", + "pathType": "Prefix", + "backend": { + "service": { + "name": "minio", + "port": {"number": 9001}, + } + }, + } + ] + }, + }, + ] + + tls = [] + if cert_manager_issuer: + for rule in rules: + host = rule["host"] + tls.append( + { + "hosts": [host], + "secretName": host.replace(".", "-"), + } + ) + else: + root_tls = the.config["root_tls_secret_name"] + wildcard_tls = the.config["wildcard_tls_secret_name"] + if root_tls and wildcard_tls: + for rule in rules: + host = rule["host"] + tls.append( + { + "hosts": [host], + "secretName": ( + root_tls if host == the.cluster_domain else wildcard_tls + ), + } + ) + + if the.config["ssl_redirect"]: + annotations = { + "nginx.ingress.kubernetes.io/force-ssl-redirect": True, + "nginx.ingress.kubernetes.io/ssl-redirect": True, + } + else: + annotations = {} + + dns_url = the.config.get("external_dns_url") + if dns_url: + annotations["external-dns.alpha.kubernetes.io/alias"] = True + annotations["external-dns.alpha.kubernetes.io/target"] = dns_url + + if cert_manager_issuer: + annotations["cert-manager.io/cluster-issuer"] = cert_manager_issuer + + return { + "apiVersion": "networking.k8s.io/v1", + "kind": "Ingress", + "metadata": { + "name": "datacoves-core-ingress", + "annotations": annotations, + }, + "spec": { + "rules": rules, + "tls": tls, + "ingressClassName": "nginx", + }, + } diff --git a/scripts/observability/setup_observability_utils.py b/scripts/observability/setup_observability_utils.py new file mode 100644 index 00000000..9786e87f --- /dev/null +++ b/scripts/observability/setup_observability_utils.py @@ -0,0 +1,204 @@ +import copy +import json + +from lib.config import config as the +from lib.tools import parse_image_uri +from scripts import k8s_utils +from scripts.setup_core import get_api_pod + + +def get_grafana_orgs() -> list: + """Retrieve orgs from core api""" + api_pod = get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod("core", api_pod, container="api") + raw_data = run_in_api_pod("./manage.py generate_account_slugs", capture_output=True) + return json.loads(raw_data.stdout) + + +def get_grafana_postgres_credentials() -> list: + """Retrieve credentials for postgres""" + api_pod = get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod("core", api_pod, container="api") + raw_data = run_in_api_pod( + "./manage.py cluster_config --config-name service_account.postgres_grafana", + capture_output=True, + ) + data = raw_data.stdout + if data: + try: + # It will be the last line of output + json_data = raw_data.stdout.strip().split(b"\n")[-1] + data = json.loads(json_data) + except json.decoder.JSONDecodeError as e: + print("Got an error while processing the following output:", e) + print(data.stdout) + raise + if data: + if "description" in data: + del data["description"] + + data.update({"type": "postgres"}) + return data + + return { + "type": "postgres", + "host": "grafana-postgres-postgresql.prometheus", + "name": "grafana", + "user": "postgres", + "password": the.config["grafana"]["postgres_password"], + } + + +def gen_image_spec(image, complete=True, add_pull_secret=False): + _image = the.docker_image_name_and_tag(image) + if complete: + registry, repository, tag = parse_image_uri(_image) + spec = {"registry": registry, "repository": repository, "tag": tag} + else: + repository, tag = _image.split(":") + spec = {"repository": repository, "tag": tag} + + if add_pull_secret: + spec.update({"pullSecrets": [the.config["docker_config_secret_name"]]}) + + return spec + + +def get_resources(resource_name: str) -> dict: + default_resources = { + "grafana": { + "requests": {"cpu": "1", "memory": "1Gi"}, + "limits": {"cpu": "1.5", "memory": "4Gi"}, + }, + "prometheus": { + "requests": {"cpu": "1", "memory": "1Gi"}, + "limits": {"cpu": "1.5", "memory": "7Gi"}, + }, + "prometheus_alertmanager": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "50m", "memory": "100Mi"}, + }, + "kube_state_metrics": { + "requests": {"cpu": "25m", "memory": "100Mi"}, + "limits": {"cpu": "50m", "memory": "300Mi"}, + }, + "prometheus_node_exporter": { + "requests": {"cpu": "25m", "memory": "25Mi"}, + "limits": {"cpu": "50m", "memory": "50Mi"}, + }, + "prometheus_operator": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "100m", "memory": "300Mi"}, + }, + "grafana_sidecar": { + "requests": {"cpu": "25m", "memory": "25Mi"}, + "limits": {"cpu": "50m", "memory": "100Mi"}, + }, + "grafana_config_reloader": { + "requests": {"cpu": "25m", "memory": "20Mi"}, + "limits": {"cpu": "50m", "memory": "40Mi"}, + }, + "grafana_agent": { + "requests": {"cpu": "25m", "memory": "50Mi"}, + "limits": {"cpu": "50m", "memory": "200Mi"}, + }, + "minio": { + "requests": {"cpu": "200m", "memory": "2Gi"}, + "limits": {"cpu": "500m", "memory": "3Gi"}, + }, + "postgresql": { + "requests": {"cpu": "200m", "memory": "1Gi"}, + "limits": {"cpu": "1", "memory": "2Gi"}, + }, + "loki_ingester": { + "requests": {"cpu": "50m", "memory": "1Gi"}, + "limits": {"cpu": "100m", "memory": "2Gi"}, + }, + "loki_compactor": { + "requests": {"cpu": "50m", "memory": "50Mi"}, + "limits": {"cpu": "50m", "memory": "100Mi"}, + }, + "loki_distributor": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "100m", "memory": "400Mi"}, + }, + "loki_gateway": { + "requests": {"cpu": "50m", "memory": "20Mi"}, + "limits": {"cpu": "100m", "memory": "200Mi"}, + }, + "loki_querier": { + "requests": {"cpu": "100m", "memory": "512Mi"}, + "limits": {"cpu": "500m", "memory": "1512Mi"}, + }, + "loki_query_frontend": { + "requests": {"cpu": "100m", "memory": "200Mi"}, + "limits": {"cpu": "200m", "memory": "512Mi"}, + }, + "loki_ruler": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "100m", "memory": "200Mi"}, + }, + "promtail": { + "requests": {"cpu": "50m", "memory": "50Mi"}, + "limits": {"cpu": "100m", "memory": "100Mi"}, + }, + "mimir_compactor": { + "requests": {"cpu": "50m", "memory": "200Mi"}, + "limits": {"cpu": "100m", "memory": "800Mi"}, + }, + "mimir_distributor": { + "requests": {"cpu": "100m", "memory": "200Mi"}, + "limits": {"cpu": "1", "memory": "1.5Gi"}, + }, + "mimir_ingester": { + "requests": {"cpu": "100m", "memory": "500Mi"}, + "limits": {"cpu": "1", "memory": "4Gi"}, + }, + "mimir_overrides_exporter": { + "requests": {"cpu": "50m", "memory": "50Mi"}, + "limits": {"cpu": "50m", "memory": "100Mi"}, + }, + "mimir_querier": { + "requests": {"cpu": "200m", "memory": "200Mi"}, + "limits": {"cpu": "500m", "memory": "1512Mi"}, + }, + "mimir_query_frontend": { + "requests": {"cpu": "100m", "memory": "128Mi"}, + "limits": {"cpu": "200m", "memory": "512Mi"}, + }, + "mimir_query_scheduler": { + "requests": {"cpu": "100m", "memory": "128Mi"}, + "limits": {"cpu": "200m", "memory": "200Mi"}, + }, + "mimir_rollout_operator": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "100m", "memory": "200Mi"}, + }, + "mimir_ruler": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "100m", "memory": "200Mi"}, + }, + "mimir_store_gateway": { + "requests": {"cpu": "100m", "memory": "100Mi"}, + "limits": {"cpu": "200m", "memory": "500Mi"}, + }, + "mimir_gateway": { + "requests": {"cpu": "100m", "memory": "100Mi"}, + "limits": {"cpu": "200m", "memory": "500Mi"}, + }, + "mimir_alertmanager": { + "requests": {"cpu": "50m", "memory": "128Mi"}, + "limits": {"cpu": "100m", "memory": "200Mi"}, + }, + "cortex_tenant": { + "requests": {"cpu": "100m", "memory": "100Mi"}, + "limits": {"cpu": "400m", "memory": "500Mi"}, + }, + } + + res = copy.deepcopy(default_resources[resource_name]) + if the.config["observability_stack_resources"]: + custom_res = the.config["observability_stack_resources"].get(resource_name, {}) + res.update(custom_res) + + return res diff --git a/scripts/pre-commit-hooks/__init__.py b/scripts/pre-commit-hooks/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/scripts/pre-commit-hooks/validate_docker_labels.py b/scripts/pre-commit-hooks/validate_docker_labels.py new file mode 100644 index 00000000..cd07e184 --- /dev/null +++ b/scripts/pre-commit-hooks/validate_docker_labels.py @@ -0,0 +1,347 @@ +import glob +import os +import re +import sys +from pathlib import Path + +from rich.console import Console +from rich.panel import Panel +from rich.table import Table + +BASE_DIR = Path(__file__).resolve().parent.parent.parent +DOCKERFILE_PATHS = [ + BASE_DIR / "src/code-server/code-server/Dockerfile", + BASE_DIR / "src/code-server/dbt-core-interface/Dockerfile", + BASE_DIR / "src/airflow/airflow/Dockerfile", + BASE_DIR / "src/ci/airflow/Dockerfile", + BASE_DIR / "src/ci/basic/Dockerfile", +] +REQUIREMENTS_COMMON_BASE = BASE_DIR / "src/common/requirements" +CODE_SERVER_EXTENSIONS_BASE = BASE_DIR / "src/code-server/code-server/profiles" +REQUIREMENTS_AIRFLOW_PROVIDERS_BASE = BASE_DIR / "src/common/providers" + + +console = Console() + + +def extract_labels_from_dockerfiles(filepath: Path, label_pattern: str): + """ + Extracts labels from a Dockerfile based on the provided label pattern. + + Args: + filepath (Path): The path to the Dockerfile. + label_pattern (str): The regex pattern to match the labels. + + Returns: + dict: A dictionary where the keys are the label names and the values are their versions. + + Raises: + SystemExit: If the Dockerfile does not exist, the script exits with a status code of 1. + """ + if not filepath.exists(): + dockerfile = filepath.relative_to(BASE_DIR) + print(f"Error: {dockerfile} does not exist.") + sys.exit(1) + + labels = {} + dockerfile_content = filepath.read_text(encoding="utf-8") + + folder_name = filepath.parent.name + if "src/ci" in str(filepath.absolute()): + folder_name = f"ci-{folder_name}" + + pattern = re.compile(label_pattern) + matches = pattern.findall(dockerfile_content) + for lib, version in matches: + labels[lib] = version + + return labels + + +def extract_requirements(filepath_base: str): + """ + Extracts the list of required libraries and their versions from the requirements files. + + This function reads all files in the specified base directory, extracts the library names and versions, + and returns them in a dictionary. If any errors occur during the processing of the files, they are collected + and printed, and the script exits with a status code of 1. + + Args: + filepath_base (str): The base directory containing the requirements files. + + Returns: + dict: A dictionary where the keys are the library names and the values are their versions. + + Raises: + SystemExit: If any errors occur during the processing of the files, the script exits with a status code of 1. + """ + requirements = {} + req_errors = [] + + # Iterate over all files in the specified base directory + for filename in os.listdir(filepath_base): + filepath = os.path.join(filepath_base, filename) + + # Check if the current path is a file + if os.path.isfile(filepath): + with open(filepath, "r", encoding="utf-8") as f: + # Read each line in the file + for line in f.readlines(): + req_file = Path(filepath).relative_to(BASE_DIR) + line_sanitized = line.strip() + + # Skip lines that start with "-r" + if line.startswith("-r"): + continue + + try: + # Correctly separate the library and the version + library, version = re.split(r"==|~=|@|>=", line_sanitized) + requirements[library] = version + except ValueError: + # Collect errors for lines with incorrect format + req_errors.append( + f"Version not found or incorrect format in line: " + f"[yellow bold]{line_sanitized}[/yellow bold] " + f"in [yellow]{req_file}[/yellow]" + ) + + # If there are any errors, print them and exit with an error code + if req_errors: + errors = "\n".join(req_errors) + panel = Panel(errors, style="red") + console.print(panel, end=" ") + sys.exit(1) + + return requirements + + +def dockerfile_validate_libraries(): + """ + Validates that the labels in Dockerfile have the correct format and match the requirements.txt. + This function performs the following steps: + 1. Extracts the list of required libraries and their versions from the requirements file. + 2. Iterates over the specified Dockerfile paths. + 3. For each Dockerfile, constructs a label pattern based on the folder name. + 4. Extracts labels from the Dockerfile using the constructed pattern. + 5. Compares the extracted labels with the required libraries and their versions. + 6. Collects any discrepancies, such as missing libraries or version mismatches. + 7. If discrepancies are found, prints a table of mismatches and exits with an error code. + Returns: + None + Raises: + SystemExit: If any discrepancies are found between the Dockerfile labels and the requirements. + """ + """Validates that the labels in Dockerfile have the correct format and match the requirements.txt.""" + + libraries_with_error = [] + requirements = extract_requirements(filepath_base=REQUIREMENTS_COMMON_BASE) + + for filepath in DOCKERFILE_PATHS: + folder_name = filepath.parent.name + if "src/ci" in str(filepath.absolute()): + folder_name = f"ci-{folder_name}" + + label_pattern = ( + rf"com\.datacoves\.library\.{folder_name}\.(\S+?)=([\w\.\-\[\]+]+)" + ) + docker_labels = extract_labels_from_dockerfiles( + filepath=filepath, label_pattern=label_pattern + ) + dockerfile = filepath.relative_to(BASE_DIR) + + for lib, req_version in requirements.items(): + if lib not in docker_labels: + print(docker_labels) + print(lib, req_version) + libraries_with_error.append( + [str(dockerfile), lib, req_version, "Missing"] + ) + + else: + docker_version = docker_labels[lib] # Get the version from requirements + if req_version != docker_version: + libraries_with_error.append( + [str(dockerfile), lib, req_version, docker_version] + ) + + if len(libraries_with_error) > 0: + table = Table( + "Dockerfile", + "Library", + "Req version", + title="Dockerfile Libraries Missmatching", + ) + table.add_column("Docker version", style="red") + for dockerfile, library, req_version, docker_version in libraries_with_error: + table.add_row(dockerfile, library, req_version, docker_version) + + console.print(table) + sys.exit(1) + + +def extract_code_server_extensions(): + """ + Extracts and returns a dictionary of code-server extensions and their versions. + + This function searches for all `.vsix` files within the `CODE_SERVER_EXTENSIONS_BASE` directory + and its subdirectories. It extracts the version from the filename and maps the extension name + to its version in a dictionary. If any errors occur during the processing of the files, they + are collected and printed, and the script exits with a status code of 1. + + Returns: + dict: A dictionary where the keys are the extension names and the values are their versions. + + Raises: + SystemExit: If any errors occur during the processing of the files, the script exits with a status code of 1. + """ + + def extract_code_server_extension_version(filename: str): + """Extracts the version from the filename.""" + match = re.search(r"[\-\.]([\d\.]+)(?=\.[a-z]+$|$)", filename) + if match: + return match.group(1) + return None + + extensions = {} + extensions_errors = [] + + # Use glob for more concise and efficient file searching + for vsix_file in glob.glob( + f"{CODE_SERVER_EXTENSIONS_BASE}/**/**/*.vsix" + ): # Recursive search + try: + filename = os.path.basename(vsix_file).replace(".vsix", "") + version = extract_code_server_extension_version(filename) + # Remove version and any trailing chars + name = ( + filename.replace("-" + version, "") + .replace("." + version, "") + .replace("v" + version, "") + .strip("_.") + ) + extensions[name] = version + + except Exception as e: + extensions_errors.append(f"Error processing {vsix_file}: {e}") + + if extensions_errors: + # Print errors with a clear indicator + errors = "\n".join(extensions_errors) + panel = Panel(errors, style="red") + console.print(panel, end=" ") + sys.exit(1) + + return extensions + + +def dockerfile_validate_extensions(): + """Validates that the extensions in Dockerfile have the correct format and match the extracted extensions.""" + + # Define the pattern to match the extension labels in the Dockerfile + extension_pattern = r"com\.datacoves\.extension\.code-server\.([\w\.-]+)=([\w\.-]+)" + filepath = DOCKERFILE_PATHS[0] + dockerfile = filepath.relative_to(BASE_DIR) + + # Extract the labels from the Dockerfile + docker_labels = extract_labels_from_dockerfiles( + filepath=filepath, label_pattern=extension_pattern + ) + + # Extract the code-server extensions + code_server_extensions = extract_code_server_extensions() + + extensions_with_error = [] + + # Compare the extracted labels with the code-server extensions + for ext_name_label, ext_label_version in docker_labels.items(): + if ext_name_label in code_server_extensions: + ext_version = code_server_extensions[ + ext_name_label + ] # Get the version from requirements + if ext_version != ext_label_version: + extensions_with_error.append( + [str(dockerfile), ext_name_label, ext_version, ext_label_version] + ) + + # If there are any mismatches, print them in a table and exit with an error + if len(extensions_with_error) > 0: + table = Table( + "Dockerfile", + "Extension", + "Ext version", + title="Dockerfile Extensions Missmatching", + ) + table.add_column("Docker version", style="red") + for dockerfile, ext_name, ext_version, docker_version in extensions_with_error: + table.add_row(dockerfile, ext_name, ext_version, docker_version) + + console.print(table) + sys.exit(1) + + +def docker_validate_airflow_providers(): + """Validates that the Airflow providers in Dockerfile have the correct format and match the requirements.""" + + # Define the pattern to match the provider labels in the Dockerfile + provider_pattern = r"com\.datacoves\.provider\.airflow\.(\S+?)=([\w\.\-\[\]+]+)" + filepath = DOCKERFILE_PATHS[2] + dockerfile = filepath.relative_to(BASE_DIR) + + # Extract the labels from the Dockerfile + docker_labels = extract_labels_from_dockerfiles( + filepath=filepath, label_pattern=provider_pattern + ) + + # Extract the requirements for Airflow providers + requirements = extract_requirements( + filepath_base=REQUIREMENTS_AIRFLOW_PROVIDERS_BASE + ) + + providers_with_error = [] + + # Check for missing providers in the Dockerfile + for provider, provider_version in requirements.items(): + if provider not in docker_labels: + providers_with_error.append( + f"LABEL provider for [bold]{provider}[/bold] not found in {dockerfile}" + ) + + if providers_with_error: + # Print errors with a clear indicator + errors = "\n".join(providers_with_error) + panel = Panel(errors, style="red") + console.print(panel, end=" ") + sys.exit(1) + + providers_with_error = [] + + # Compare the extracted labels with the requirements + for provider, provider_version in requirements.items(): + provider_docker_version = docker_labels[ + provider + ] # Get the version from requirements + if provider_version != provider_docker_version: + providers_with_error.append( + [provider, provider_version, provider_docker_version] + ) + + # If there are any mismatches, print them in a table and exit with an error + if len(providers_with_error) > 0: + table = Table( + "Provider", + "Prov version", + title=f"Airflow Providers Missmatching: {dockerfile}", + ) + table.add_column("Docker version", style="red") + for provider, prov_version, docker_version in providers_with_error: + table.add_row(provider, prov_version, docker_version) + + console.print(table) + sys.exit(1) + + +if __name__ == "__main__": + dockerfile_validate_libraries() + dockerfile_validate_extensions() + docker_validate_airflow_providers() diff --git a/scripts/releases.py b/scripts/releases.py new file mode 100644 index 00000000..cfb1418e --- /dev/null +++ b/scripts/releases.py @@ -0,0 +1,87 @@ +from datetime import datetime, timezone +from os import listdir +from pathlib import Path + +from lib.config.config import load_envs +from lib.config_files import load_yaml +from scripts import setup_secrets + +from .github import Releaser + +legacy_config_cluster_domains = [ + "app.datacoves.com", + "beta.datacoves.com", + "ensembledev.apps.jnj.com", +] + + +def generate_release_name(ticket_check: bool = True): + """ + Returns generated release name and timestamp + + Setting ticket_check to False will skip the check to see if we're + using a ticket. + """ + release = None + + if ticket_check: + ticket = setup_secrets.get_ticket_number_by_git_branch(prompt=False) + else: + ticket = False + + now = datetime.now(timezone.utc) + + if ticket: + release = f"pre-{ticket}" + else: + version = str(load_yaml(".version.yml")["version"]) + release = version + "." + now.strftime("%Y%m%d%H%M") + + return release, now.isoformat(), ticket + + +def active_releases(cluster_domain_suffix_mask=""): + """Return the list of releases that are active: referenced by some + environment in some cluster matching the cluster_domain_suffix_mask.""" + cluster_domains = [ + p.name + for p in Path("config").glob(f"*{cluster_domain_suffix_mask}") + if p.name not in legacy_config_cluster_domains + ] + return [ + env["release"] + for cluster_domain in cluster_domains + for env in load_envs(cluster_domain).values() + ] + + +def all_releases(): + return [ + release_path.name[: -len(".yaml")] + for release_path in sorted(Path("releases").glob("*.yaml")) + ] + + +def get_latest_release(): + """Get the newest x.x.xxxxxx.yml file from releases/ folder""" + releases_path = "./releases" + Releaser().download_releases() + + releases = [ + f.replace(".yaml", "") + for f in sorted(listdir(releases_path)) + if Path(f"{releases_path}/{f}").is_file() + ] + + try: + ticket = setup_secrets.get_ticket_number_by_git_branch(prompt=False) + except Exception: + # This causes a problem for CI. + ticket = None + + if ticket: + releases_drafts = list(filter(lambda x: x == f"pre-{ticket}", releases)) + if releases_drafts: + return releases_drafts[0] + + return list(filter(lambda x: "pre" not in x, releases))[-1] diff --git a/scripts/setup_admission_controller.py b/scripts/setup_admission_controller.py new file mode 100644 index 00000000..9b10f4fe --- /dev/null +++ b/scripts/setup_admission_controller.py @@ -0,0 +1,14 @@ +from lib.config import config as the +from scripts import k8s_utils + + +def setup_admission_controller(cluster_domain): + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + if the.config["block_workers"]: + helm_chart_path = "./src/core/admission-controller/charts/admission-controller" + k8s_utils.helm( + f"upgrade --install admission-controller --namespace core {helm_chart_path}" + ) + else: + print("Admission Controller not installed due to cluster params configuration.") diff --git a/scripts/setup_base.py b/scripts/setup_base.py new file mode 100644 index 00000000..aec9d305 --- /dev/null +++ b/scripts/setup_base.py @@ -0,0 +1,255 @@ +import json +import sys +from pathlib import Path + +import questionary +from questionary.prompts.common import Choice + +from lib import cmd +from lib.config import config as the +from lib.config_files import write_yaml +from scripts import console +from scripts.k8s_utils import create_namespace, helm, kubectl, wait_for_deployment + + +def setup_base(cluster_domain): + """Setup the base cluster level dependencies.""" + check_scripts_dependencies() + + base_dir = Path(f"config/{cluster_domain}/base") + Path(".generated").mkdir(parents=True, exist_ok=True) + + # If the config base dir is a kustomize directory, run it. + if (base_dir / "kustomization.yaml").exists(): + console.print_title("Running kustomize") + kubectl(f"apply -k {base_dir}") + + # TODO: Instead of selecting prereqs to install based on the domain, start querying the cluster + # to understand what's missing, i.e. if ingress-nginx namespace is not present, then install it. + + # Run domain specific setup. + setupers = [ + ("datacoveslocal.com", setup_base_localhost), + ("east-us-a.datacoves.com", setup_base_aks), + (".orrum.com", setup_base_aks), + (".ccsperfusion.com", setup_base_aks), + (".jnj.com", setup_base_jnj), + ("chap.datacoves.com", setup_base_kenvue), + ("datacoves.kenvue.com", setup_base_kenvue), + ] + for domain, setup in setupers: + if cluster_domain.endswith(domain): + return setup(cluster_domain, base_dir) + + +# return setup_base_automatic(cluster_domain, base_dir) + + +def setup_base_localhost(cluster_domain, base_dir): + kubectl("-n kube-system set env daemonset/calico-node FELIX_IGNORELOOSERPF=true") + kubectl("label --overwrite namespace/kube-system networking/namespace=kube-system") + setup_nfs() + + +def setup_base_aks(cluster_domain, base_dir): + kubectl("label --overwrite namespace/kube-system networking/namespace=kube-system") + + +def setup_base_automatic(cluster_domain, base_dir): + kubectl("label --overwrite namespace/kube-system networking/namespace=kube-system") + + helm_repo = "https://kubernetes.github.io" + + console.print_title("Installing ingress-nginx") + create_namespace("ingress-nginx") + helm( + f"-n ingress-nginx upgrade --install ingress-nginx --repo {helm_repo}/ingress-nginx --version 4.8.3" + ) + + +def setup_base_jnj(cluster_domain, base_dir): + # Add docker credentials to connect to artifactory. + kubectl(f"apply -f {base_dir}/docker-config.secret.yaml") + + selected = questionary.checkbox( + "Uncheck the tasks you want to skip", + choices=[ + Choice("Install ingress-nginx", value="ingress", checked=True), + Choice("Install EFS CSI driver", value="efs", checked=True), + Choice("Install metrics server", value="metrics", checked=True), + ], + ).ask() + + if not selected: + print("No helm chart to install.") + return + + # Add jnj helm charts. + username = questionary.text("jnj-helm-charts username:").ask() + password = questionary.password("jnj-helm-charts password:").ask() + helm_repo = "https://artifactrepo.jnj.com/artifactory/jnj-helm-charts" + helm( + f"repo add jnj-helm-charts {helm_repo}", + "--username", + username, + "--password", + password, + "--force-update", + ) + helm("repo update jnj-helm-charts") + + if "ingress" in selected: + # Install ingress-nginx from jnj helm chart. + console.print_title("Installing ingress-nginx") + create_namespace("ingress-nginx") + helm( + "-n ingress-nginx upgrade --install ingress-nginx jnj-helm-charts/ingress-nginx --version 4.11.5", + "-f", + f"{base_dir}/ingress-nginx-common-values.yaml", + "-f", + f"{base_dir}/ingress-nginx-setup-values.yaml", + "-f", + f"{base_dir}/ingress-nginx-internal-values.yaml", + "--atomic", + ) + + if "efs" in selected: + if questionary.confirm( + "Have you opted out from CloudX managed EFS CSI driver?" + ).ask(): + # Install EFS-CSI from jnj helm chart. + console.print_title("Installing EFS CSI driver") + helm( + "-n kube-system upgrade --install efs-csi-driver jnj-helm-charts/aws-efs-csi-driver --version 2.5.7", + "-f", + f"{base_dir}/efs-csi-common-values.yaml", + "--atomic", + ) + else: + print( + "Could'nt install EFS CSI Driver if cluster was not opted out from CloudX first." + ) + + if "metrics" in selected: + # Install metrics server to run "kubectl top" commands + console.print_title("Installing Metrics Server") + helm( + "-n kube-system upgrade --install metrics-server jnj-helm-charts/metrics-server", + "-f", + f"{base_dir}/metrics-server-values.yaml", + "--atomic", + ) + + +def setup_base_kenvue(cluster_domain, base_dir): + # Add docker credentials to connect to artifactory. + kubectl(f"apply -f {base_dir}/docker-config.secret.yaml") + + selected = questionary.checkbox( + "Uncheck the tasks you want to skip", + choices=[ + Choice("Install ingress-nginx", value="ingress", checked=True), + Choice("Install EFS CSI driver", value="efs", checked=True), + Choice("Install metrics server", value="metrics", checked=True), + ], + ).ask() + + if not selected: + print("No helm chart to install.") + return + + helm_repo = "oci://kenvue.jfrog.io/dco-helm" + + if "ingress" in selected: + # Install ingress-nginx from jnj helm chart. + console.print_title("Installing ingress-nginx") + create_namespace("ingress-nginx") + helm( + f"-n ingress-nginx upgrade --install ingress-nginx {helm_repo}/ingress-nginx --version 4.8.3", + "-f", + f"{base_dir}/ingress-nginx-common-values.yaml", + "-f", + f"{base_dir}/ingress-nginx-setup-values.yaml", + "-f", + f"{base_dir}/ingress-nginx-internal-values.yaml", + "--atomic", + ) + + if "efs" in selected: + if questionary.confirm( + "Have you opted out from CloudX managed EFS CSI driver?" + ).ask(): + # Install EFS-CSI from jnj helm chart. + console.print_title("Installing EFS CSI driver") + helm( + f"-n kube-system upgrade --install efs-csi-driver {helm_repo}/aws-efs-csi-driver --version 3.0.1", + "-f", + f"{base_dir}/efs-csi-common-values.yaml", + "--atomic", + ) + else: + print( + "Could'nt install EFS CSI Driver if cluster was not opted out from CloudX first." + ) + + if "metrics" in selected: + # Install metrics server to run "kubectl top" commands + console.print_title("Installing Metrics Server") + helm( + f"-n kube-system upgrade --install metrics-server {helm_repo}/metrics-server --version 3.12.2", + "-f", + f"{base_dir}/metrics-server-values.yaml", + "--atomic", + ) + + +def wait_for_base(cluster_domain): + """Wait until the base cluster level dependencies are running.""" + check_scripts_dependencies() + wait_for_nginx() + + +def wait_for_nginx(): + wait_for_deployment("ingress-nginx", "ingress-nginx-controller") + + +def check_scripts_dependencies(): + check_kubectl_version() + + +def check_kubectl_version(): + """Check kubectl has a client version we can use""" + v = json.loads(cmd.output("kubectl version --client -o json"))["clientVersion"] + if v["major"] != "1" or int(v["minor"]) < 21: + print("kubectl version --client must be 1.x with x >= 21", file=sys.stderr) + print("download the latest binary with:", file=sys.stderr) + print( + ' $ curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"', # noqa: E501 + file=sys.stderr, + ) + raise + + +def uninstall_nfs(): + helm("-n local-path-storage uninstall nfs-server-provisioner --ignore-not-found") + + +def setup_nfs(): + """create an NFS server to simulate filesystems like EFS and AFS""" + helm( + "repo add nfs-ganesha-server-and-external-provisioner " + "https://kubernetes-sigs.github.io/nfs-ganesha-server-and-external-provisioner/" + ) + helm("repo update nfs-ganesha-server-and-external-provisioner") + nfs_server_values = ".generated/nfs-server-values.yaml" + data = { + "nodeSelector": the.VOLUMED_NODE_SELECTOR, + "persistence": {"enabled": "false", "storageClass": "standard", "size": "10Gi"}, + } + write_yaml(nfs_server_values, data) + helm( + "-n local-path-storage install nfs-server-provisioner " + "nfs-ganesha-server-and-external-provisioner/nfs-server-provisioner ", + "-f", + nfs_server_values, + ) diff --git a/scripts/setup_core.py b/scripts/setup_core.py new file mode 100755 index 00000000..7f1057fe --- /dev/null +++ b/scripts/setup_core.py @@ -0,0 +1,1943 @@ +import glob +import json +import os +import re +from datetime import datetime +from enum import Enum, auto +from pathlib import Path + +import questionary +from questionary.prompts.common import Choice + +from lib import cmd +from lib.config import config as the +from lib.config.config import load_envs +from lib.config_files import emit_yamls, load_text_file, mkdir, write_yaml +from lib.tools import parse_image_uri +from scripts import console, helm_utils, k8s_utils, setup_base, volumes +from scripts.k8s_utils import helm, kubectl, kubectl_output + +CONFIG_DIR = "/tmp/config" +NAMESPACE_CORE = "core" +NAMESPACE_OBSERVABILITY = "prometheus" +CORE_OUTDIR = None + + +class DatacovesCoreK8sName(Enum): + API = "api" + WORKER_MAIN = "worker-main" + WORKER_LONG = "worker-long" + BEAT = "beat" + FLOWER = "flower" + DBT_API = "dbt-api" + WORKBENCH = "workbench" + STATIC_PAGES = "static-pages" + REDIS = "redis-master" + POSTGRES = "postgres-postgresql" + K8S_MONITOR = "k8s-monitor" + + +class WaitFor(Enum): + DATABASE = auto() + REDIS = auto() + DJANGO_MODEL = auto() + + +def load_config(cluster_domain: str): + global CORE_OUTDIR + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + CORE_OUTDIR = the.CORE_DIR / the.cluster_domain + mkdir(the.OUTPUT_DIR) + mkdir(the.CORE_DIR) + mkdir(CORE_OUTDIR) + + +def setup_core(cluster_domain: str): + load_config(cluster_domain) + if the.config["install_node_local_dns"]: + # Install node local DNS + print("Installing node local dns...") + setup_node_local_dns(cluster_domain) + + else: + print("Did not install node local dns") + + api_pod = k8s_utils.pod_for_deployment( + ns=NAMESPACE_CORE, deployment=DatacovesCoreK8sName.API.value + ) + if api_pod: + try: + # Creates a cluster upgrade record if core api is running already + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container=DatacovesCoreK8sName.API.value + ) + author = cmd.output("whoami").replace("\n", "") + run_in_api_pod( + f'./manage.py upgrade_cluster --release-name {the.config["release"]} --triggered-by "{author}"' + ) + except Exception: + # This could fail for many reasons, and we don't need to register an upgrade in that case + pass + else: + print("\nWARNING: Api pod to setup_core not found.") + + setup_core_k8s(cluster_domain) + setup_core_data(cluster_domain) + + +def _is_running_ci() -> bool: + ci_running = os.getenv("CI_RUNNING", "false").lower() in ( + "yes", + "y", + "true", + "t", + "1", + ) + + return ci_running + + +def setup_node_local_dns(cluster_domain: str): + """Set up the node local DNS via helm chart""" + + # Default configuration + # Note that this isn't deep-updated so if you alter 'config' + # be careful, though I think normally we will be adding a 'resources' + # section which is more the planned intent here. + node_dns_config = { + "config": { + # This should be our cluster DNS IP for CoreDNS + "dnsServer": "10.96.0.10", + }, + } + + # Allow cluster-config.yaml to override it. + # https://github.com/deliveryhero/helm-charts/tree/master/stable/node-local-dns + node_dns_config.update(the.config.get("node_dns_config", {})) + + # Write config file + write_yaml(".generated/node-local-dns-values.yaml", node_dns_config) + + # Add the helm repo + helm("repo add deliveryhero https://charts.deliveryhero.io/") + + # Run the helm chart + helm( + "upgrade --install node-local-dns deliveryhero/node-local-dns " + "--version 2.0.14 -f .generated/node-local-dns-values.yaml" + ) + + +def setup_core_k8s(cluster_domain): + setup_base.wait_for_base(cluster_domain) + gen_core() + + # Creates the namespace + kubectl(f"apply -f {CORE_OUTDIR}/core.yaml") + + if the.config["run_core_api_db_in_cluster"]: + setup_postgres() + + if the.config["local_dbt_api_minio"]: + setup_minio() + + setup_redis() + kubectl(f"apply -k {CORE_OUTDIR}") + + +def setup_maintenance_page( + cluster_domain, + on_maintenance=False, + restore_time=None, + contact_email=None, + contact_name=None, +): + load_config(cluster_domain) + gen_maintenance_page( + on_maintenance=on_maintenance, + restore_time=restore_time, + contact_email=contact_email, + contact_name=contact_name, + ) + + global CORE_OUTDIR + kubectl(f"apply -k {CORE_OUTDIR}") + deployment_name = DatacovesCoreK8sName.STATIC_PAGES.value + kubectl(f"-n {NAMESPACE_CORE} scale deployment {deployment_name} --replicas=0") + kubectl(f"-n {NAMESPACE_CORE} scale deployment {deployment_name} --replicas=1") + + +def get_api_pod(): + if the.config["run_core_api_db_in_cluster"]: + k8s_utils.wait_for_statefulset( + NAMESPACE_CORE, DatacovesCoreK8sName.POSTGRES.value + ) + + k8s_utils.wait_for_deployment(NAMESPACE_CORE, DatacovesCoreK8sName.API.value) + api_pod = k8s_utils.pod_for_deployment( + NAMESPACE_CORE, DatacovesCoreK8sName.API.value + ) + print(f"api pod: {api_pod}") + return api_pod + + +def setup_core_data(cluster_domain, api_pod=None): + k8s_utils.wait_for_deployment( + NAMESPACE_CORE, DatacovesCoreK8sName.WORKER_MAIN.value + ) + k8s_utils.wait_for_deployment( + NAMESPACE_CORE, DatacovesCoreK8sName.WORKER_LONG.value + ) + k8s_utils.wait_for_statefulset(NAMESPACE_CORE, DatacovesCoreK8sName.REDIS.value) + api_pod = api_pod or get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container=DatacovesCoreK8sName.API.value + ) + + run_in_api_pod("./manage.py migrate") + + envs = load_envs(cluster_domain) + load_core_config(cluster_domain, envs, api_pod) + register_environments(envs, api_pod) + + # Remove deprecated + if "deprecated" in the.release: + if "deployments" in the.release["deprecated"]: + delete_deprecated_deployments(the.release["deprecated"]["deployments"]) + + if "charts" in the.release["deprecated"]: + delete_deprecated_helm_charts(the.release["deprecated"]["charts"]) + + if "hpas" in the.release["deprecated"]: + delete_deprecated_hpas(the.release["deprecated"]["hpas"]) + + +def load_core_config(cluster_domain, envs, api_pod=None): + api_pod = api_pod or get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container=DatacovesCoreK8sName.API.value + ) + create_default_user = the.cluster_is_localhost() and not _is_running_ci() + + # Copy configs to core api pod. + roots = [ + "cluster-params.yaml", + "cluster-params.secret.yaml", + "pricing.yaml", + "environments", + ] + run_in_api_pod(f"rm -rf {CONFIG_DIR}") + run_in_api_pod(f"mkdir -p {CONFIG_DIR}") + for root in roots: + path = Path(f"config/{cluster_domain}/{root}") + if path.exists(): + kubectl(f"-n core cp {path} {api_pod}:/tmp/config/{root}") + + # Load releases + releases_dir = copy_releases(api_pod, run_in_api_pod) + current_release = the.config["release"] + run_in_api_pod( + f"./manage.py load_releases --releases {releases_dir} --current-release {current_release}" + ) + + # Register cluster. + env_slugs = ",".join(envs.keys()) + if env_slugs: + env_slugs = " --envs " + env_slugs + + req_user_confirm = not _is_running_ci() + + run_in_api_pod( + f"./manage.py register_cluster --config {CONFIG_DIR}{env_slugs} " + f"--create-default-user {create_default_user} --user-confirm " + f"{req_user_confirm}" + ) + + sa_cmd = run_in_api_pod( + "./manage.py generate_service_account --email-sa api-core-sa", + capture_output=True, + ) + + try: + # It will be the last line of output + json_data = sa_cmd.stdout.strip().split(b"\n")[-1] + + sa_data = json.loads(json_data) + + if "username" not in sa_data: # We loaded something, but it is wrong + raise RuntimeError(f"Loaded wrong data from: {sa_cmd.stdout}") + + except json.decoder.JSONDecodeError as e: + print("Got an error while processing the following output:", e) + print(sa_cmd.stdout) + raise + + gen_core_api_service_account_k8s_secret(data=sa_data) + + +def gen_core_api_service_account_k8s_secret(data: dict): + if data: + secret_name = "api-core-service-account" + username = data["username"] + password = data["password"] + token = data["token"] + + try: + kubectl(f" -n {NAMESPACE_CORE} delete secret {secret_name}") + except Exception: + pass + + cmd = ( + f" -n {NAMESPACE_CORE} create secret generic " + f"{secret_name} " + f"--from-literal=username={username} " + f"--from-literal=password={password} " + f"--from-literal=token={token}" + ) + kubectl(cmd) + + +def copy_releases(api_pod, run_in_api_pod): + """Load releases from the releases directory on the api pod""" + releases_dir = "/tmp/releases" + run_in_api_pod(f"rm -rf {releases_dir}") + kubectl(f"-n core cp releases {api_pod}:{releases_dir}") + return releases_dir + + +def copy_test_secrets(api_pod, run_in_api_pod): + """Load releases from the releases directory on the api pod""" + test_secret_file = "/tmp/integration_tests.yaml" + run_in_api_pod(f"rm -f {test_secret_file}") + kubectl(f"-n core cp secrets/integration_tests.yaml {api_pod}:{test_secret_file}") + return test_secret_file + + +def register_environments(envs, api_pod=None): + api_pod = api_pod or get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container=DatacovesCoreK8sName.API.value + ) + req_user_confirm = not _is_running_ci() + for env in envs.keys(): + run_in_api_pod( + f"./manage.py register_environment --config {CONFIG_DIR} --env {env} --user-confirm {req_user_confirm}" + ) + + +def gen_core(): + files = { + "core.yaml": gen_core_base(), + "k8s-monitor.yaml": gen_k8s_monitor(), + "core-api.yaml": gen_core_api(), + "core-workbench.yaml": gen_core_workbench(), + "core-worker-main.yaml": gen_core_worker( + DatacovesCoreK8sName.WORKER_MAIN, "api-main" + ), + "core-worker-long.yaml": gen_core_worker( + DatacovesCoreK8sName.WORKER_LONG, "api-long" + ), + "core-flower.yaml": gen_core_flower(), + "core-beat.yaml": gen_core_beat(), + "core-static-pages-configmap.yaml": gen_core_static_pages_configmap(), + "core-static-pages.yaml": gen_core_static_pages(), + "ingress.yaml": gen_ingress(), + "workspace_editor_role.yaml": gen_workspace_editor_role(), + "role_binding.yaml": gen_role_binding(), + } + + if the.config["enable_dbt_api"]: + files.update({"core-dbt-api.yaml": gen_core_dbt_api()}) + + files.update( + { + "kustomization.yaml": gen_kustomization(resources=[*files]), + "core-api.env": load_text_file(the.SECRETS_DIR / "core-api.env"), + } + ) + + # We need to add the core-dbt-api.env file after generating the kustomization, + # otherwise it gets wrongfully included as "resource" in the kustomization.yaml + # file and kubectl can't apply the changes. + if the.config["enable_dbt_api"]: + files.update( + { + "core-dbt-api.env": load_text_file( + the.SECRETS_DIR / "core-dbt-api.env" + ), + } + ) + + if the.config["generate_docker_secret"]: + files["docker-config.secret.json"] = load_text_file( + the.SECRETS_DIR / "docker-config.secret.json" + ) + + global CORE_OUTDIR + emit_yamls(CORE_OUTDIR, files) + + +def gen_maintenance_page( + on_maintenance=False, + restore_time=None, + contact_email=None, + contact_name=None, +): + data = { + "RESTORE_TIME": restore_time, + "CONTACT_EMAIL": contact_email, + "CONTACT_NAME": contact_name, + } + files = { + "core-static-pages-configmap.yaml": gen_core_static_pages_configmap(data=data), + "ingress.yaml": gen_ingress(on_maintenance=on_maintenance), + } + files.update( + { + "kustomization.yaml": gen_kustomization(resources=[*files]), + } + ) + + if the.config["generate_docker_secret"]: + files["docker-config.secret.json"] = load_text_file( + the.SECRETS_DIR / "docker-config.secret.json" + ) + + global CORE_OUTDIR + emit_yamls(CORE_OUTDIR, files) + + +def run_integration_tests(cluster_domain, single_test=""): + """ + Run the integration tests for the core cluster. + """ + + setup_base.wait_for_base(cluster_domain) + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + api_pod = get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container=DatacovesCoreK8sName.API.value + ) + copy_releases(api_pod, run_in_api_pod) + copy_test_secrets(api_pod, run_in_api_pod) + timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") + report_name = f"report_integration_test_{timestamp}.html " + pytest_folder = the.DATACOVES_DIR / "src/core/api/app/integration_tests" + + if single_test: + pytest_single_file = pytest_folder / single_test + files = [pytest_single_file] + else: + files = glob.glob(f"{pytest_folder}/**/test*.py", recursive=True) + + # Workaround, when we will be able to run all test together we can update this code. + files.sort() + for file in files: + file_path = Path(file) + pytest_file = file_path.relative_to(pytest_folder) + name = pytest_file.with_suffix("") + output_dir = f"integration_tests/output/{name}" + + pytest_cmd = ( + f"pytest integration_tests/{pytest_file} " + "--browser firefox " + "--template=html1/index.html " + f"--output {output_dir} " + "--screenshot only-on-failure " + f"--report={output_dir}/report/{report_name}" + ) + run_in_api_pod(["su", "abc", "-c", pytest_cmd]) + + +def run_unit_tests(cluster_domain): + """ + Run the inut tests for the core cluster. + """ + setup_base.wait_for_base(cluster_domain) + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + api_pod = get_api_pod() + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container=DatacovesCoreK8sName.API.value + ) + run_in_api_pod("./manage.py test") + + +def gen_core_base(): + # Core namespace setup. + return [ + { + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": NAMESPACE_CORE, + "labels": { + "k8s.datacoves.com/namespace": NAMESPACE_CORE, + "k8s.datacoves.com/release": the.config["release"], + }, + }, + }, + { + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": { + "name": DatacovesCoreK8sName.API.value, + "namespace": NAMESPACE_CORE, + }, + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}], + }, + { + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": {"name": NAMESPACE_CORE, "namespace": NAMESPACE_CORE}, + "imagePullSecrets": [{"name": the.config["docker_config_secret_name"]}], + }, + ] + + +def gen_ingress(on_maintenance=False): + cert_manager_issuer = the.config.get("cert_manager_issuer") + main_service = "core-static-pages-svc" if on_maintenance else "core-workbench-svc" + + rules = [ + { + "host": the.cluster_domain, + "http": { + "paths": [ + { + "path": "/", + "pathType": "Prefix", + "backend": { + "service": { + "name": main_service, + "port": {"number": 80}, + } + }, + } + ] + }, + }, + { + "host": f"api.{the.cluster_domain}", + "http": { + "paths": [ + { + "path": "/", + "pathType": "Prefix", + "backend": { + "service": { + "name": "core-api-svc", + "port": {"number": 80}, + } + }, + } + ] + }, + }, + { + "host": f"cdn.{the.cluster_domain}", + "http": { + "paths": [ + { + "path": "/", + "pathType": "Prefix", + "backend": { + "service": { + "name": "core-static-pages-svc", + "port": {"number": 80}, + } + }, + } + ] + }, + }, + ] + + if the.config["flower_service"]: + rules.append( + { + "host": f"flower.{the.cluster_domain}", + "http": { + "paths": [ + { + "path": "/", + "pathType": "Prefix", + "backend": { + "service": { + "name": "core-flower-svc", + "port": {"number": 80}, + } + }, + } + ] + }, + } + ) + + if the.config["expose_dbt_api"]: + rules.append( + { + "host": f"dbt.{the.cluster_domain}", + "http": { + "paths": [ + { + "path": "/", + "pathType": "Prefix", + "backend": { + "service": { + "name": "core-dbt-api-svc", + "port": {"number": 80}, + } + }, + } + ] + }, + } + ) + + tls = [] + + if cert_manager_issuer: + for rule in rules: + host = rule["host"] + tls.append( + { + "hosts": [host], + "secretName": host.replace(".", "-"), + } + ) + else: + root_tls = the.config["root_tls_secret_name"] + wildcard_tls = the.config["wildcard_tls_secret_name"] + if root_tls and wildcard_tls: + for rule in rules: + host = rule["host"] + tls.append( + { + "hosts": [host], + "secretName": ( + root_tls if host == the.cluster_domain else wildcard_tls + ), + } + ) + + if the.config["ssl_redirect"]: + annotations = { + "nginx.ingress.kubernetes.io/force-ssl-redirect": True, + "nginx.ingress.kubernetes.io/ssl-redirect": True, + } + else: + annotations = {} + + dns_url = the.config.get("external_dns_url") + if dns_url: + annotations["external-dns.alpha.kubernetes.io/alias"] = True + annotations["external-dns.alpha.kubernetes.io/target"] = dns_url + + if cert_manager_issuer: + annotations["cert-manager.io/cluster-issuer"] = cert_manager_issuer + + return { + "apiVersion": "networking.k8s.io/v1", + "kind": "Ingress", + "metadata": { + "name": "datacoves-core-ingress", + "annotations": annotations, + }, + "spec": { + "rules": rules, + "tls": tls, + "ingressClassName": "nginx", + }, + } + + +def gen_kustomization(resources): + sgen = [ + { + "name": "core-api-env", + "type": "Opaque", + "envs": ["core-api.env"], + } + ] + + if the.config["enable_dbt_api"]: + sgen.append( + { + "name": "core-dbt-api-env", + "type": "Opaque", + "envs": ["core-dbt-api.env"], + }, + ) + + if the.config["generate_docker_secret"]: + sgen.append( + { + "name": the.config["docker_config_secret_name"], + "type": "kubernetes.io/dockerconfigjson", + "files": [".dockerconfigjson=docker-config.secret.json"], + "options": {"disableNameSuffixHash": True}, + } + ) + + kustomization = { + "apiVersion": "kustomize.config.k8s.io/v1beta1", + "kind": "Kustomization", + "namespace": NAMESPACE_CORE, + "resources": resources, + "secretGenerator": sgen, + } + + return kustomization + + +def get_wait_for_init_container( + wait_for: WaitFor, + image: str, + volume_mounts: dict = None, + env: list = None, + env_from: list = None, + command: str = None, +) -> dict: + if wait_for == WaitFor.DATABASE: + name = "wait-for-db" + _command = command or "./manage.py wait_for_db" + + elif wait_for == WaitFor.REDIS: + name = "wait-for-redis" + _command = command or "./manage.py wait_for_redis" + + elif wait_for == WaitFor.DJANGO_MODEL: + name = "wait-for-django-model" + _command = command or "./manage.py wait_for_model" + + else: + name = "wait-for" + _command = command or "echo Datacoves" + + init_container_spec = { + "name": name, + "image": image, + "imagePullPolicy": "IfNotPresent", + "command": ["sh", "-c", _command], + "envFrom": [{"secretRef": {"name": "core-api-env"}}], + } + + if env: + init_container_spec["env"] = env + + if env_from: + init_container_spec["envFrom"] = env_from + + if volume_mounts: + init_container_spec["volumeMounts"] = volume_mounts + + return init_container_spec + + +def gen_k8s_monitor(): + image = the.docker_image_name_and_tag("datacovesprivate/sidecar-k8s-monitor") + container = { + "name": DatacovesCoreK8sName.K8S_MONITOR.value, + "image": image, + "imagePullPolicy": "IfNotPresent", + "command": ["datacoves"], + "args": ["deployments-status"], + "env": [ + { + "name": "REDIS_URL", + "value": "redis://redis-master.core.svc.cluster.local:6379/1", + }, + ], + } + + volumes = [] + volume_mounts = [] + if the.config["local_api_volume"]: + volumes.append( + { + "name": f"core-{DatacovesCoreK8sName.API.value}-volume", + "hostPath": {"type": "Directory", "path": "/mnt/core-api"}, + } + ) + volume_mounts.append( + { + "mountPath": "/usr/src/app", + "name": f"core-{DatacovesCoreK8sName.API.value}-volume", + } + ) + + env = [ + {"name": "BASE_DOMAIN", "value": the.cluster_domain}, + {"name": "VERSION", "value": image.split(":")[1]}, + {"name": "RELEASE", "value": the.config["release"]}, + ] + env_from = [{"secretRef": {"name": "core-api-env"}}] + init_container = get_wait_for_init_container( + wait_for=WaitFor.REDIS, + image=the.docker_image_name_and_tag("datacovesprivate/core-api"), + env=env, + env_from=env_from, + volume_mounts=volume_mounts, + ) + + if the.config["defines_resource_requests"]: + container["resources"] = { + "requests": {"memory": "200Mi", "cpu": "300m"}, + "limits": {"memory": "500Mi", "cpu": "600m"}, + } + + return gen_deployment( + name=DatacovesCoreK8sName.K8S_MONITOR, + service_account=DatacovesCoreK8sName.API.value, + containers=[container], + init_containers=[init_container], + volumes=volumes, + ) + + +def gen_core_api(): + if the.config["dont_use_uwsgi"] or the.config["tests_runner"]: + host_alias = { + "ip": kubectl_output( + "-n ingress-nginx get svc ingress-nginx-controller " + "-o jsonpath='{.spec.clusterIP}'" + ).replace("'", ""), + "hostnames": [ + f"api.{the.cluster_domain}", + f"{the.cluster_domain}", + f"tst001.{the.cluster_domain}", + f"authenticate-tst001.{the.cluster_domain}", + f"john-transform-tst001.{the.cluster_domain}", + f"superset-tst001.{the.cluster_domain}", + f"airflow-tst001.{the.cluster_domain}", + f"airbyte-tst001.{the.cluster_domain}", + f"john-dbt-docs-tst001.{the.cluster_domain}", + f"dbt-docs-tst001.{the.cluster_domain}", + ], + } + return gen_django_service( + DatacovesCoreK8sName.API, + service_port=8000, + container_args=["local"], + host_alias=host_alias, + pdb=the.config["defines_pdb"], + ) + else: + return gen_django_service( + DatacovesCoreK8sName.API, + service_port=8000, + hpa=the.config["defines_resource_requests"], + pdb=the.config["defines_pdb"], + ) + + +def gen_core_dbt_api(): + if the.config["dont_use_uwsgi"] or the.config["tests_runner"]: + host_alias = { + "ip": kubectl_output( + "-n ingress-nginx get svc ingress-nginx-controller " + "-o jsonpath='{.spec.clusterIP}'" + ).replace("'", ""), + "hostnames": [f"dbt.{the.cluster_domain}"], + } + return gen_elixir_service( + DatacovesCoreK8sName.DBT_API, + service_port=4000, + container_args=["local"], + host_alias=host_alias, + pdb=the.config["defines_pdb"], + ) + else: + return gen_elixir_service( + DatacovesCoreK8sName.DBT_API, + service_port=4000, + hpa=the.config["defines_resource_requests"], + pdb=the.config["defines_pdb"], + ) + + +def gen_core_worker(name, queue): + mode = "worker-reload" if the.config["celery_worker_autoreload"] else "worker" + return gen_django_service( + name, + container_args=[mode, queue], + pdb=the.config["defines_pdb"], + # hpa=the.config["defines_resource_requests"], + # commented out since HPA is killing workers while running tasks + # TODO: make a gracefull termination of worker pods for the HPA + ) + + +def gen_core_beat(): + return gen_django_service( + name=DatacovesCoreK8sName.BEAT, + container_args=["beat"], + pdb=the.config["defines_pdb"], + ) + + +def gen_core_flower(): + return gen_django_service( + name=DatacovesCoreK8sName.FLOWER, service_port=5555, container_args=["flower"] + ) + + +def gen_core_api_service_monitor(): + if k8s_utils.exists_namespace(ns=NAMESPACE_OBSERVABILITY): + return { + "apiVersion": "monitoring.coreos.com/v1", + "kind": "ServiceMonitor", + "metadata": { + "name": DatacovesCoreK8sName.API.value, + "namespace": NAMESPACE_CORE, + "labels": { + "app": DatacovesCoreK8sName.API.value, + "release": "prometheus", + }, + }, + "spec": { + "namespaceSelector": {"matchNames": [NAMESPACE_CORE]}, + "selector": { + "matchLabels": { + "app": DatacovesCoreK8sName.API.value, + } + }, + "endpoints": [ + { + "port": "http", + "interval": "30s", + "bearerTokenSecret": { + "name": "api-core-service-account", + "key": "token", + }, + "relabelings": [ + {"targetLabel": "app", "replacement": "datacoves-core-api"} + ], + } + ], + }, + } + + return None + + +def gen_core_flower_service_monitor(): + if the.config["flower_service"] and k8s_utils.exists_namespace( + ns=NAMESPACE_OBSERVABILITY + ): + return { + "apiVersion": "monitoring.coreos.com/v1", + "kind": "ServiceMonitor", + "metadata": { + "name": DatacovesCoreK8sName.FLOWER.value, + "namespace": NAMESPACE_CORE, + "labels": { + "app": DatacovesCoreK8sName.FLOWER.value, + "release": "prometheus", + }, + }, + "spec": { + "namespaceSelector": {"matchNames": [NAMESPACE_CORE]}, + "selector": { + "matchLabels": { + "app": DatacovesCoreK8sName.FLOWER.value, + } + }, + "endpoints": [{"port": "http", "interval": "15s"}], + }, + } + + return None + + +def gen_django_service( # noqa: C901 + name: DatacovesCoreK8sName, + service_port=None, + container_args=None, + hpa=False, + pdb=False, + host_alias=None, +): + volumes = [] + volume_mounts = [] + + if name in (DatacovesCoreK8sName.WORKER_MAIN, DatacovesCoreK8sName.WORKER_LONG): + # Implementing worker restarts after redis connection lost based on this thread: + # https://github.com/celery/celery/discussions/7276#discussioncomment-7315040 + service_probe = { + "exec": { + "command": [ + "bash", + "-c", + "celery -A datacoves inspect ping -d celery@$HOSTNAME", + ] + }, + "initialDelaySeconds": 30, + "periodSeconds": 30, + "timeoutSeconds": 30, + "failureThreshold": 3, + "successThreshold": 1, + } + + elif service_port and name not in (DatacovesCoreK8sName.FLOWER,): + service_probe_path = ( + "/healthz/" if name == DatacovesCoreK8sName.API else "/healthcheck/" + ) + service_probe = { + "httpGet": { + "httpHeaders": [ + { + "name": "Host", + "value": f"api.{the.cluster_domain}", + } + ], + "path": service_probe_path, + "port": "http", + "scheme": "HTTP", + }, + "initialDelaySeconds": 45, + "periodSeconds": 10, + "timeoutSeconds": 5, + "failureThreshold": 3, + "successThreshold": 1, + } + + else: + service_probe = { + "exec": {"command": ["/usr/src/app/manage.py", "check"]}, + "initialDelaySeconds": 60, + "periodSeconds": 60, + "timeoutSeconds": 30, + "failureThreshold": 3, + "successThreshold": 1, + } + + if the.config["local_api_volume"]: + volumes.append( + { + "name": f"core-{name.value}-volume", + "hostPath": {"type": "Directory", "path": "/mnt/core-api"}, + } + ) + volume_mounts.append( + {"mountPath": "/usr/src/app", "name": f"core-{name.value}-volume"} + ) + + image = the.docker_image_name_and_tag("datacovesprivate/core-api") + env = [ + {"name": "BASE_DOMAIN", "value": the.cluster_domain}, + {"name": "VERSION", "value": image.split(":")[1]}, + {"name": "RELEASE", "value": the.config["release"]}, + ] + env_from = [{"secretRef": {"name": "core-api-env"}}] + container = { + "name": name.value, + "image": image, + "imagePullPolicy": "IfNotPresent", + "env": env, + "envFrom": env_from, + } + + setup_probes = False + if ( + the.config["core_liveness_readiness"] + and name == DatacovesCoreK8sName.API + and not the.config["local_api_volume"] + ): + setup_probes = True + + elif the.config["core_liveness_readiness"] and name != DatacovesCoreK8sName.API: + setup_probes = True + + if setup_probes: + container.update( + {"readinessProbe": service_probe, "livenessProbe": service_probe} + ) + + if volume_mounts: + container["volumeMounts"] = volume_mounts + + if container_args: + container["args"] = container_args + + if the.config["defines_resource_requests"]: + container["resources"] = { + "requests": {"memory": "500Mi", "cpu": "100m"}, + "limits": {"memory": "1Gi", "cpu": "300m"}, + } + + init_containers = [] + if name == DatacovesCoreK8sName.API: + init_container = get_wait_for_init_container( + wait_for=WaitFor.DATABASE, + image=image, + env=env, + env_from=env_from, + volume_mounts=volume_mounts, + ) + init_containers.append(init_container) + + elif name in ( + DatacovesCoreK8sName.WORKER_MAIN, + DatacovesCoreK8sName.WORKER_LONG, + DatacovesCoreK8sName.BEAT, + DatacovesCoreK8sName.FLOWER, + ): + init_container = get_wait_for_init_container( + wait_for=WaitFor.REDIS, + image=image, + env=env, + env_from=env_from, + volume_mounts=volume_mounts, + ) + init_containers.append(init_container) + + if name == DatacovesCoreK8sName.BEAT: + init_container = get_wait_for_init_container( + wait_for=WaitFor.DJANGO_MODEL, + image=image, + env=env, + env_from=env_from, + volume_mounts=volume_mounts, + command="./manage.py wait_for_model --has-records true", + ) + init_containers.append(init_container) + + containers = [container] + if service_port: + container["ports"] = [ + {"containerPort": service_port, "protocol": "TCP", "name": "http"} + ] + + return gen_deployment_and_service( + name=name, + containers=containers, + init_containers=init_containers, + service_account=DatacovesCoreK8sName.API.value, + target_port=service_port, + volumes=volumes, + hpa=hpa, + pdb=pdb, + host_alias=host_alias, + ) + else: + return gen_deployment( + name=name, + service_account=DatacovesCoreK8sName.API.value, + containers=containers, + init_containers=init_containers, + volumes=volumes, + hpa=hpa, + pdb=pdb, + host_alias=host_alias, + ) + + +def gen_elixir_service( + name, service_port=None, container_args=None, hpa=False, pdb=False, host_alias=None +): + volumes = [] + volume_mounts = [] + + if service_port: + serviceProbePath = "/api/internal/healthcheck" + serviceProbe = { + "failureThreshold": 3, + "httpGet": { + "httpHeaders": [ + { + "name": "Host", + "value": f"dbt.{the.cluster_domain}", + }, + ], + "path": serviceProbePath, + "port": "http", + "scheme": "HTTP", + }, + "initialDelaySeconds": 60, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 5, + } + + image = the.docker_image_name_and_tag("datacovesprivate/core-dbt-api") + container = { + "name": name.value, + "image": image, + "imagePullPolicy": "IfNotPresent", + "env": [ + {"name": "PHX_HOST", "value": the.cluster_domain}, + ], + "envFrom": [{"secretRef": {"name": "core-dbt-api-env"}}], + } + + if the.config["local_dbt_api_volume"]: + volumes.append( + { + "name": f"core-{name.value}-volume", + "hostPath": {"type": "Directory", "path": f"/mnt/core-{name.value}"}, + } + ) + volume_mounts.append( + {"mountPath": "/home/runner/app", "name": f"core-{name.value}-volume"} + ) + + if the.config["core_liveness_readiness"]: + container.update( + {"livenessProbe": serviceProbe, "readinessProbe": serviceProbe} + ) + + if volume_mounts: + container["volumeMounts"] = volume_mounts + + if container_args: + container["args"] = container_args + + if the.config["defines_resource_requests"]: + container["resources"] = { + "requests": {"memory": "200Mi", "cpu": "100m"}, + "limits": {"memory": "800Mi", "cpu": "500m"}, + } + + init_container = get_wait_for_init_container( + wait_for=WaitFor.DJANGO_MODEL, + image=the.docker_image_name_and_tag("datacovesprivate/core-api"), + env=[ + {"name": "BASE_DOMAIN", "value": the.cluster_domain}, + {"name": "VERSION", "value": image.split(":")[1]}, + {"name": "RELEASE", "value": the.config["release"]}, + ], + env_from=[{"secretRef": {"name": "core-api-env"}}], + command="./manage.py wait_for_model --has-records true", + ) + + if service_port: + container["ports"] = [ + {"containerPort": service_port, "protocol": "TCP", "name": "http"} + ] + + return gen_deployment_and_service( + name=name, + containers=[container], + init_containers=[init_container], + service_account=DatacovesCoreK8sName.API.value, + target_port=service_port, + volumes=volumes, + hpa=hpa, + pdb=pdb, + host_alias=host_alias, + ) + else: + return gen_deployment( + name=name, + service_account=DatacovesCoreK8sName.API.value, + init_containers=[init_container], + containers=[container], + volumes=volumes, + hpa=hpa, + pdb=pdb, + host_alias=host_alias, + ) + + +def gen_core_workbench(): + image = the.docker_image_name_and_tag("datacovesprivate/core-workbench") + image_policy = "IfNotPresent" + port = 80 + volumes = [] + volume_mounts = [] + + serviceProbe = { + "failureThreshold": 3, + "httpGet": {"path": "/", "port": "http", "scheme": "HTTP"}, + "initialDelaySeconds": 60, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 1, + } + + if the.config["local_workbench_image"]: + image = "datacovesprivate/core-workbench-local:latest" + + if the.config["local_workbench_volume"]: + port = 3000 + volumes.append( + { + "name": "core-workbench-volume", + "hostPath": {"type": "Directory", "path": "/mnt/core-workbench"}, + } + ) + volume_mounts.append( + { + "name": "core-workbench-volume", + "mountPath": "/usr/src/app", + } + ) + + container = { + "name": DatacovesCoreK8sName.WORKBENCH.value, + "image": image, + "imagePullPolicy": image_policy, + "ports": [{"containerPort": port, "protocol": "TCP", "name": "http"}], + } + if volume_mounts: + container["volumeMounts"] = volume_mounts + + if the.config["defines_resource_requests"]: + container["resources"] = { + "requests": {"memory": "100Mi", "cpu": "50m"}, + "limits": {"memory": "300Mi", "cpu": "200m"}, + } + + if the.config["core_liveness_readiness"]: + container.update( + {"livenessProbe": serviceProbe, "readinessProbe": serviceProbe} + ) + + return gen_deployment_and_service( + name=DatacovesCoreK8sName.WORKBENCH, + containers=[container], + target_port=port, + volumes=volumes, + hpa=the.config["defines_resource_requests"], + pdb=the.config["defines_pdb"], + ) + + +def gen_deployment_and_service( + name: DatacovesCoreK8sName, + containers, + init_containers=None, + service_account=NAMESPACE_CORE, + target_port=80, + volumes=[], + node_selector=the.GENERAL_NODE_SELECTOR, + hpa=False, + pdb=False, + host_alias=None, +): + return gen_service(name, target_port) + gen_deployment( + name=name, + service_account=service_account, + containers=containers, + init_containers=init_containers, + volumes=volumes, + node_selector=node_selector, + hpa=hpa, + pdb=pdb, + host_alias=host_alias, + ) + + +def gen_service(name: DatacovesCoreK8sName, target_port, name_port="http"): + labels = {"app": name.value} + return [ + { + "apiVersion": "v1", + "kind": "Service", + "metadata": { + "name": f"core-{name.value}-svc", + "labels": {"app": name.value}, + }, + "spec": { + "selector": labels, + "ports": [ + { + "name": name_port, + "port": 80, + "protocol": "TCP", + "targetPort": target_port, + } + ], + }, + } + ] + + +def gen_core_static_pages_configmap(data: dict = {}): + configmap = { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": DatacovesCoreK8sName.STATIC_PAGES.value, + }, + "data": data, + } + + return configmap + + +def gen_core_static_pages(): + image = the.docker_image_name_and_tag("datacovesprivate/core-static-pages") + image_policy = "IfNotPresent" + port = 80 + + serviceProbe = { + "failureThreshold": 3, + "httpGet": {"path": "/", "port": "http", "scheme": "HTTP"}, + "initialDelaySeconds": 60, + "periodSeconds": 10, + "successThreshold": 1, + "timeoutSeconds": 1, + } + + container = { + "name": DatacovesCoreK8sName.STATIC_PAGES.value, + "image": image, + "imagePullPolicy": image_policy, + "ports": [{"containerPort": port, "protocol": "TCP", "name": "http"}], + "envFrom": [ + {"configMapRef": {"name": DatacovesCoreK8sName.STATIC_PAGES.value}} + ], + } + + if the.config["defines_resource_requests"]: + container["resources"] = { + "requests": {"memory": "50Mi", "cpu": "20m"}, + "limits": {"memory": "250Mi", "cpu": "250m"}, + } + + if the.config["core_liveness_readiness"]: + container.update( + {"livenessProbe": serviceProbe, "readinessProbe": serviceProbe} + ) + + return gen_deployment_and_service( + name=DatacovesCoreK8sName.STATIC_PAGES, + containers=[container], + ) + + +def gen_deployment( + name: DatacovesCoreK8sName, + service_account, + containers, + volumes, + init_containers=None, + node_selector=the.GENERAL_NODE_SELECTOR, + hpa=False, + pdb=False, + host_alias=None, +): + labels = {"app": name.value, "application-id": the.config["application_id"]} + meta = {"name": name.value, "labels": labels} + template_spec = { + "serviceAccountName": service_account, + "containers": containers, + } + if init_containers: + template_spec["initContainers"] = init_containers + + if volumes: + template_spec["volumes"] = volumes + if node_selector: + template_spec["nodeSelector"] = node_selector + if host_alias: + template_spec["hostAliases"] = [host_alias] + + resources = [ + { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": meta, + "spec": { + "replicas": the.config.get( + f"min_replicas_{name.value.replace('-', '_')}", 1 + ), + "selector": {"matchLabels": labels}, + "template": { + "metadata": meta, + "spec": template_spec, + }, + }, + } + ] + + if hpa: + resources.append(gen_hpa(name.value)) + + if pdb: + resources.append(gen_pdb(name.value)) + + return resources + + +def gen_hpa(name: str): + return { + "apiVersion": "autoscaling/v2", + "kind": "HorizontalPodAutoscaler", + "metadata": {"name": name}, + "spec": { + "scaleTargetRef": { + "apiVersion": "apps/v1", + "kind": "Deployment", + "name": name, + }, + "minReplicas": the.config.get(f"min_replicas_{name.replace('-', '_')}", 1), + "maxReplicas": 5, + "metrics": [ + { + "type": "Resource", + "resource": { + "name": "cpu", + "target": {"type": "Utilization", "averageUtilization": 60}, + }, + } + ], + # We want to scale up no more than 1 pod every 5 minutes + "behavior": { + "scaleUp": { + "policies": [{"type": "Pods", "value": 1, "periodSeconds": 300}], + "selectPolicy": "Min", + } + }, + }, + } + + +def gen_pdb(name): + return { + "apiVersion": "policy/v1", + "kind": "PodDisruptionBudget", + "metadata": { + "name": name, + "namespace": NAMESPACE_CORE, + }, + "spec": { + "minAvailable": 1, + "selector": { + "matchLabels": { + "app": name, + } + }, + }, + } + + +def gen_workspace_editor_role(): + read_verbs = ["get", "list", "watch"] + edit_verbs = read_verbs + ["create", "delete", "patch", "update"] + return { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRole", + "metadata": {"name": "workspace-editor-role"}, + "rules": [ + { + "apiGroups": [""], + "resources": [ + "namespaces", + "namespaces/finalize", + "secrets", + "configmaps", + "persistentvolumes", + "persistentvolumeclaims", + "resourcequotas", + "limitranges", + # NOTE: The pods edit permissions are required to launch + # kaniko build pods in the core ns. Perhaps we should + # restrict this permission just to that ns. On the other + # hand it might be useful for other things. + "pods", + "pods/log", + "pods/exec", + "endpoints", + "events", + "nodes", + "serviceaccounts", + ], + "verbs": edit_verbs, + }, + { + "apiGroups": ["rbac.authorization.k8s.io"], + "resources": ["roles", "rolebindings"], + "verbs": edit_verbs, + }, + { + "apiGroups": ["admissionregistration.k8s.io"], + "resources": ["validatingwebhookconfigurations"], + "verbs": edit_verbs, + }, + # To install the efs storage class for airflow. + { + "apiGroups": ["storage.k8s.io"], + "resources": ["storageclasses"], + "verbs": edit_verbs, + }, + { + "apiGroups": ["datacoves.com"], + "resources": ["accounts", "workspaces"], + "verbs": edit_verbs, + }, + { + "apiGroups": ["datacoves.com"], + "resources": ["accounts/status", "workspaces/status"], + "verbs": read_verbs, + }, + { + "apiGroups": ["apps"], + "resources": [ + "deployments", + "deployments/status", + "statefulsets", + "statefulsets/status", + ], + "verbs": edit_verbs, + }, + # To read ingress controller ip + { + "apiGroups": [""], + "resources": ["services"], + "verbs": read_verbs, + }, + # To monitoring + { + "apiGroups": ["monitoring.coreos.com"], + "resources": ["servicemonitors"], + "verbs": edit_verbs, + }, + # To overprovisioning + { + "apiGroups": ["scheduling.k8s.io"], + "resources": ["priorityclasses"], + "verbs": edit_verbs, + }, + # To jobs and cron jobs + { + "apiGroups": ["batch"], + "resources": ["cronjobs"], + "verbs": edit_verbs, + }, + ], + } + + +def gen_role_binding(): + return { + "apiVersion": "rbac.authorization.k8s.io/v1", + "kind": "ClusterRoleBinding", + "metadata": {"name": "workspace-editor-rolebinding"}, + "subjects": [ + { + "kind": "ServiceAccount", + "name": DatacovesCoreK8sName.API.value, + "namespace": NAMESPACE_CORE, + } + ], + "roleRef": { + "kind": "ClusterRole", + "name": "workspace-editor-role", + "apiGroup": "rbac.authorization.k8s.io", + }, + } + + +def setup_redis(): + """ + - https://redis.io/docs/management/config/ + - https://artifacthub.io/packages/helm/bitnami/redis/17.9.0 + """ + + redis_image = the.docker_image_name_and_tag("bitnami/redis") + redis_registry, redis_repository, redis_tag = parse_image_uri(redis_image) + redis_values = ".generated/redis-values.yaml" + data = { + "architecture": "standalone", + "auth": {"enabled": False}, + "global": {"imagePullSecrets": [the.config["docker_config_secret_name"]]}, + "commonLabels": { + "datacoves.com/adapter": "core", + "application-id": the.config["application_id"], + }, + "master": { + "persistence": {"enabled": False}, + "nodeSelector": the.GENERAL_NODE_SELECTOR, + "configuration": "maxmemory 500mb\nmaxmemory-policy allkeys-lru", + "resources": { + "limits": { + "cpu": "300m", + "memory": "500Mi", + }, + "requests": { + "cpu": "50m", + "memory": "200Mi", + }, + }, + }, + "image": { + "registry": redis_registry, + "repository": redis_repository, + "tag": redis_tag, + "pullSecrets": [the.config["docker_config_secret_name"]], + }, + "pdb": {"create": the.config["defines_pdb"]}, + } + + if k8s_utils.exists_namespace(ns=NAMESPACE_OBSERVABILITY): + redis_metrics_image = the.docker_image_name_and_tag("bitnami/redis-exporter") + ( + redis_metrics_registry, + redis_metrics_repository, + redis_metrics_tag, + ) = parse_image_uri(redis_metrics_image) + data.update( + { + "metrics": { + "enabled": True, + "serviceMonitor": { + "enabled": True, + "additionalLabels": {"release": NAMESPACE_OBSERVABILITY}, + }, + "image": { + "registry": redis_metrics_registry, + "repository": redis_metrics_repository, + "tag": redis_metrics_tag, + "pullSecrets": [the.config["docker_config_secret_name"]], + }, + "resources": { + "limits": { + "cpu": "100m", + "memory": "300Mi", + }, + "requests": { + "cpu": "10m", + "memory": "100Mi", + }, + }, + } + }, + ) + + else: + print("Redis metrics disabled") + + write_yaml(redis_values, data) + helm( + "-n core upgrade --install redis oci://registry-1.docker.io/bitnamicharts/redis --version 19.3.0", + "-f", + redis_values, + ) + + +def setup_postgres(): + postgres_image = the.docker_image_name_and_tag("bitnami/postgresql") + postgres_registry, postgres_repository, postgres_tag = parse_image_uri( + postgres_image + ) + postgres_values = ".generated/postgres-values.yaml" + data = { + "architecture": "standalone", + "global": {"imagePullSecrets": [the.config["docker_config_secret_name"]]}, + "commonLabels": { + "datacoves.com/adapter": "core", + "application-id": the.config["application_id"], + }, + "image": { + "registry": postgres_registry, + "repository": postgres_repository, + "tag": postgres_tag, + "pullSecrets": [the.config["docker_config_secret_name"]], + }, + "auth": { + "database": the.config["core_postgres_config"]["name"], + "postgresPassword": the.config["core_postgres_config"]["password"], + "username": the.config["core_postgres_config"]["username"], + "password": the.config["core_postgres_config"]["password"], + }, + "primary": { + "nodeSelector": the.VOLUMED_NODE_SELECTOR, + "resources": { + "limits": {"cpu": "1", "memory": "1Gi"}, + "requests": { + "cpu": "100m", + "memory": "200Mi", + }, + }, + "persistence": {"size": "5Gi", "accessModes": ["ReadWriteOnce"]}, + }, + } + write_yaml(postgres_values, data) + helm( + "-n core upgrade --install postgres oci://registry-1.docker.io/bitnamicharts/postgresql --version 15.2.9", + "-f", + postgres_values, + ) + + +def setup_minio(): + image = the.docker_image_name_and_tag("bitnami/minio") + registry, repository, tag = parse_image_uri(image) + values = ".generated/core-minio-values.yaml" + + data = { + "architecture": "standalone", + "global": {"imagePullSecrets": [the.config["docker_config_secret_name"]]}, + "commonLabels": { + "datacoves.com/adapter": "core", + "application-id": the.config["application_id"], + }, + "image": { + "registry": registry, + "repository": repository, + "tag": tag, + "pullSecrets": [the.config["docker_config_secret_name"]], + }, + "auth": { + "rootUser": the.config["core_minio_config"]["username"], + "rootPassword": the.config["core_minio_config"]["password"], + }, + "defaultBuckets": the.config["core_minio_config"]["bucket"], + "imagePullPolicy": "IfNotPresent", + "primary": { + "nodeSelector": the.VOLUMED_NODE_SELECTOR, + "persistence": {"size": "5Gi", "accessModes": ["ReadWriteOnce"]}, + }, + } + + if the.config["defines_resource_requests"]: + data["primary"]["resources"] = { + "limits": {"cpu": "400m", "memory": "300mi"}, + "requests": { + "cpu": "100m", + "memory": "200Mi", + }, + } + + write_yaml(values, data) + helm( + "-n core upgrade --install minio oci://registry-1.docker.io/bitnamicharts/minio --version 11.x.x", + "-f", + values, + ) + + +def delete_deprecated_deployments(deploys_deprecated: list): + """ + Look for the deployments deprecated and give the option to remove them. + """ + deployments = k8s_utils.get_deployments(ns=NAMESPACE_CORE) + if not deployments or not deploys_deprecated: + return + + deploys_to_delete = set(deployments).intersection(deploys_deprecated) + if deploys_to_delete: + console.print_title("Validating deprecated deployments in namespace core") + selected = questionary.checkbox( + message="Remove deprecated deployments", + choices=[ + Choice(deploy, value=deploy, checked=True) + for deploy in deploys_to_delete + ], + ).ask() + + if selected: + for deploy in selected: + kubectl(f"-n core delete deployment {deploy}") + delete_volumes_unused(key_contains=deploy) + + +def delete_deprecated_helm_charts(helm_charts_deprecated: list): + """ + Look for the installed helm charts deprecated and give the option to remove them. + """ + charts_installed = helm_utils.get_charts_installed(ns=NAMESPACE_CORE) + if not charts_installed or not helm_charts_deprecated: + return + + charts_installed = map(lambda chart: chart[1], charts_installed) + charts_to_delete = set(charts_installed).intersection(helm_charts_deprecated) + if charts_to_delete: + console.print_title("Validating deprecated helm charts in namespace core") + selected = questionary.checkbox( + message="Remove deprecated helm charts", + choices=[ + Choice(chart, value=chart, checked=True) for chart in charts_to_delete + ], + ).ask() + + if selected: + for chart in selected: + helm(f"-n core uninstall {chart}") + delete_volumes_unused(key_contains=chart) + + +def delete_deprecated_hpas(deprecated_hpas: list): + """ + Look for the hpas deprecated and give the option to remove them. + """ + hpas = k8s_utils.get_hpas(ns=NAMESPACE_CORE) + if not hpas or not deprecated_hpas: + return + + to_delete = set(hpas).intersection(deprecated_hpas) + if to_delete: + console.print_title("Validating deprecated hpas in namespace core") + selected = questionary.checkbox( + message="Remove deprecated hpas", + choices=[Choice(hpa, value=hpa, checked=True) for hpa in to_delete], + ).ask() + + if selected: + for hpa in selected: + kubectl(f"-n core delete hpa {hpa}") + + +def delete_volumes_unused(key_contains: str): + filtered = filter( + lambda volume: volume.pvc_namespace == NAMESPACE_CORE + and key_contains in volume.pvc_name, + volumes.get_pvs(), + ) + for volume in list(filtered): + if questionary.confirm( + message=( + "Do you want to remove the volume " + f"pv=[{volume.name}] " + f"pvc=[{volume.pvc_namespace}/{volume.pvc_name}]?" + ) + ).ask(): + kubectl(f"-n core delete pvc {volume.pvc_name}") + kubectl(f"delete pv {volume.name}") + + +def build_and_deploy_static_files(release: str): + """This method will build the static files in a currently running + cluster, and then push them to S3. It requires that the AWS credentials + be in the environment. + + release should be the release we're deploying static files for + """ + + aws_access_key_id = os.environ.get("AWS_ACCESS_KEY_ID") + aws_secret_access_key = os.environ.get("AWS_SECRET_ACCESS_KEY") + + if not aws_access_key_id or not aws_secret_access_key: + raise Exception("AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY must be set.") + + api_pod = k8s_utils.pod_for_deployment( + ns=NAMESPACE_CORE, deployment=DatacovesCoreK8sName.API.value + ) + + if not api_pod: + raise Exception("API pod is not running, cannot continue") + + run_in_api_pod = k8s_utils.cmd_runner_in_pod( + NAMESPACE_CORE, api_pod, container=DatacovesCoreK8sName.API.value + ) + + # Clean garbage if we have it, so we just have the version number. + release = re.sub(r"^[^\d]+", "", release) + + run_in_api_pod("pip install --no-input awscli") + run_in_api_pod("./manage.py collectstatic --noinput") + run_in_api_pod( + [ + "/bin/bash", + "-c", + f'export AWS_ACCESS_KEY_ID="{aws_access_key_id}" && ' + f'export AWS_SECRET_ACCESS_KEY="{aws_secret_access_key}" && ' + f"aws s3 --region us-east-1 sync assets s3://datacoves-us-east-1-core-api-assets/{release}", + ] + ) diff --git a/scripts/setup_operator.py b/scripts/setup_operator.py new file mode 100644 index 00000000..e0ff9718 --- /dev/null +++ b/scripts/setup_operator.py @@ -0,0 +1,136 @@ +import os + +from lib import cmd +from lib.config import config as the +from lib.config_files import emit_yamls, load_file, load_text_file, mkdir +from scripts import setup_base +from scripts.k8s_utils import get_context, kubectl + +outdir = None + + +def setup_operator(cluster_domain): + setup_base.wait_for_base(cluster_domain) + install_crds() + gen_operator(cluster_domain) + kubectl(f"apply -k .generated/operator/{cluster_domain}") + + +def install_crds(): + kubectl("apply -k config/crd", cwd="src/core/operator") + + +def gen_operator(cluster_domain): + params_yaml_path = f"config/{cluster_domain}/cluster-params.yaml" + the.load_cluster_params(params_yaml_path) + + global outdir + outdir = the.OPERATOR_DIR / the.cluster_domain + mkdir(the.OUTPUT_DIR) + mkdir(the.OPERATOR_DIR) + mkdir(outdir) + + files = { + "kustomization.yaml": gen_kustomization(), + "deployment.yaml": gen_deployment_patch(), + "sa-patch.yaml": gen_sa_patch(), + } + + if the.config["generate_docker_secret"]: + files["docker-config.secret.json"] = load_text_file( + the.SECRETS_DIR / "docker-config.secret.json" + ) + + emit_yamls(outdir, files) + + +def gen_kustomization(): + base_dir = the.DATACOVES_DIR / "src/core/operator/config/default" + + sgen = [] + if the.config["generate_docker_secret"]: + sgen.append( + { + "name": the.config["docker_config_secret_name"], + "type": "kubernetes.io/dockerconfigjson", + "files": [".dockerconfigjson=docker-config.secret.json"], + "options": {"disableNameSuffixHash": True}, + } + ) + + return { + "apiVersion": "kustomize.config.k8s.io/v1beta1", + "kind": "Kustomization", + "namespace": "operator-system", + "bases": [os.path.relpath(base_dir, outdir)], + "images": [ + { + "name": "controller", + "newName": the.docker_image_name("datacovesprivate/core-operator"), + "newTag": the.docker_image_tag("datacovesprivate/core-operator"), + }, + { + "name": "gcr.io/kubebuilder/kube-rbac-proxy", + "newName": the.docker_image_name("gcr.io/kubebuilder/kube-rbac-proxy"), + }, + ], + "secretGenerator": sgen, + "patchesStrategicMerge": ["deployment.yaml", "sa-patch.yaml"], + } + + +def gen_deployment_patch(): + env = [] + sentry_dsn = the.config.get("operator_sentry_dsn") + if sentry_dsn: + env += [ + {"name": "SENTRY_DSN", "value": sentry_dsn}, + {"name": "SENTRY_ENVIRONMENT", "value": the.cluster_domain}, + {"name": "SENTRY_RELEASE", "value": the.config["release"]}, + ] + if the.config.get("local_workbench_image"): + env.append({"name": "LOCAL_WORKBENCH_IMAGE", "value": "true"}) + return { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": {"name": "controller-manager", "namespace": "system"}, + "spec": { + "template": { + "spec": { + "containers": [ + {"name": "manager", "env": env}, + ], + "nodeSelector": the.GENERAL_NODE_SELECTOR, + }, + }, + }, + } + + +def gen_sa_patch(): + return { + "apiVersion": "v1", + "kind": "ServiceAccount", + "metadata": {"name": "controller-manager", "namespace": "system"}, + "imagePullSecrets": [ + {"name": the.config["docker_config_secret_name"]}, + ], + } + + +def scale_operator(replicas=1): + ns, deployment = "operator-system", "operator-controller-manager" + kubectl( + f"-n {ns} scale --replicas={replicas} --timeout=12s deployment {deployment}" + ) + + +def run_operator(cluster_domain): + """Run the operator from outside the cluster. For development only.""" + setup_base.wait_for_base(cluster_domain) + install_crds() + params_yaml = load_file(f"config/{cluster_domain}/cluster-params.yaml") + local_workbench_image = params_yaml.get("local_workbench_image") + os.environ["LOCAL_WORKBENCH_IMAGE"] = local_workbench_image and "true" or "" + os.environ["HELM_KUBECONTEXT"] = get_context() + cmd.run("make run ENABLE_WEBHOOKS=false", cwd="src/core/operator") diff --git a/scripts/setup_secrets.py b/scripts/setup_secrets.py new file mode 100644 index 00000000..ad76edcb --- /dev/null +++ b/scripts/setup_secrets.py @@ -0,0 +1,635 @@ +import base64 +import difflib +import json +import subprocess +import tempfile +from enum import Enum +from functools import lru_cache +from pathlib import Path + +import questionary +from dateutil import parser +from rich import box +from rich.console import Console +from rich.table import Table + +from lib import cmd + +console = Console() +BASEDIR = Path(__file__).resolve().parent.parent +VAULT = "Engineering - datacoves" +MAPPING_SECRETS_PATH = "secrets/mapping.cfg" + + +class OnePasswordStatus(Enum): + FAILED = 1 + CREATED = 2 + UPDATED = 3 + MERGED = 4 + IGNORED = 5 + + +class OnePasswordItem: + def __init__(self, path: str, ticket: str = None) -> None: + self.__path = Path(path) + self.__ticket = ticket + self.__message = None + self.__status = OnePasswordStatus.CREATED + self.__item_main = None + self.__item_ticket = None + self._load_items_from_one_password() + + @property + def name(self) -> str: + return ( + self.__path.name + if not self.__ticket + else f"{self.__ticket}-{self.__path.name}" + ) + + @property + def absolute_path(self) -> Path: + return BASEDIR / self.__path + + @property + def is_working_in_main_branch(self): + return self.__ticket is None + + @property + def exists_local(self): + return self.absolute_path.is_file() + + @property + def exists_remote_main(self) -> bool: + name_secret = self.__path.name + secrets = retrieve_secrets_from_one_password() + filtered = list(filter(lambda item: item["title"] == name_secret, secrets)) + return len(filtered) > 0 + + @property + def exists_remote_ticket(self) -> bool: + secrets = retrieve_secrets_from_one_password() + filtered = list(filter(lambda item: item["title"] == self.name, secrets)) + return len(filtered) > 0 + + @property + def content_local(self) -> str: + if self.exists_local: + with open(self.absolute_path, "r") as f: + return f.read() + + return None + + @property + def content_remote(self) -> tuple: + content_b64 = None + if self.is_working_in_main_branch and self.exists_remote_main: + name_item = self.__path.name + content_b64 = self.__item_main["content"] + + elif self.exists_remote_ticket: + name_item = self.name + content_b64 = self.__item_ticket["content"] + + elif self.__item_main: + name_item = self.__path.name + content_b64 = self.__item_main["content"] + + else: + # There is none, need to add it. + return self.__path.name, "" + + return name_item, base64.b64decode(content_b64).decode("utf-8") + + @property + def path_to_save(self) -> Path: + return self.__path + + @property + def message(self) -> str: + return self.__message + + @message.setter + def message(self, value: str): + self.__message = value + + @property + def status(self) -> OnePasswordStatus: + return self.__status + + @status.setter + def status(self, value: OnePasswordStatus): + self.__status = value + + def _load_item_detail_from_one_password(self, item_title: str): + """Loads items from 1Password""" + try: + item = subprocess.check_output( + ["op", "item", "get", item_title, "--vault", VAULT, "--format", "json"] + ) + item = json.loads(item) + data = {"id": item["id"]} + for field in item.get("fields"): + data.update({field["label"]: field.get("value")}) + + return data + + except Exception as e: + print(e) + return None + + def _load_items_from_one_password(self): + """Loads items from 1Password""" + + if self.exists_remote_main: + name_secret_main = self.absolute_path.name + self.__item_main = self._load_item_detail_from_one_password( + item_title=name_secret_main + ) + + if not self.is_working_in_main_branch and self.exists_remote_ticket: + self.__item_ticket = self._load_item_detail_from_one_password( + item_title=self.name + ) + + def diff(self, remote_to_local=True) -> list: + """Return lines with differences + + Args: + remote_to_local (bool, optional): True if the process is sync secrets. Defaults to True. + + Returns: + list: Lines with differences + """ + + content_remote_tuple = self.content_remote + name_remote = content_remote_tuple[0] + content_remote = content_remote_tuple[1] + content_local = self.content_local + + if remote_to_local: + from_content = content_local.splitlines() + to_content = content_remote.splitlines() + from_file = f"{self.__path.name} local" + to_file = f"{name_remote} remote" + + else: + from_content = content_remote.splitlines() + to_content = content_local.splitlines() + from_file = f"{name_remote} remote" + to_file = f"{self.__path.name} local" + + diff = difflib.unified_diff( + from_content, + to_content, + fromfile=from_file, + tofile=to_file, + lineterm="", + n=1, + ) + + diff = filter(lambda x: not x.startswith("@@"), diff) + return list(diff) + + def save_local(self): + content = self.content_remote[1] + if content: + Path(self.absolute_path).parent.mkdir(parents=True, exist_ok=True) + with open(self.absolute_path, "w") as f: + f.write(content) + + def save_remote(self): + content_b64 = base64.b64encode(self.content_local.encode("utf-8")).decode( + "utf-8" + ) + name_secret = ( + self.name if not self.is_working_in_main_branch else self.absolute_path.name + ) + + if (self.is_working_in_main_branch and self.exists_remote_main) or ( + not self.is_working_in_main_branch and self.exists_remote_ticket + ): + self.__status = OnePasswordStatus.UPDATED + subprocess.check_output( + [ + "op", + "item", + "edit", + name_secret, + "--vault", + VAULT, + "--tags", + f"cli.py,{self.status.name.lower()}", + f"path_to_save[text]={str(self.path_to_save)}", + f"content[text]={content_b64}", + ] + ) + + else: + self.__status = OnePasswordStatus.CREATED + subprocess.check_output( + [ + "op", + "item", + "create", + "--category", + "Secure Note", + "--title", + name_secret, + "--vault", + VAULT, + "--tags", + f"cli.py,{self.status.name.lower()}", + f"path_to_save[text]={str(self.path_to_save)}", + f"content[text]={content_b64}", + ] + ) + + def clean_remote(self): + """Deletes items in 1password""" + if not self.is_working_in_main_branch and self.exists_remote_ticket: + subprocess.check_output( + ["op", "item", "delete", self.name, "--vault", VAULT] + ) + + +def reveal_secrets(prompt=True): + """ + Init the reveal process of the secrets. + Use OnePassword cli to get the secrets from a vault and + save them to a file with the corresponding format. + """ + + try: + try: + ticket = get_ticket_number_by_git_branch(prompt=prompt) + except Exception as e: + if not prompt and "You need a ticket number" in str(e): + # This is CI ... let's let it pass + ticket = None + else: + raise + + details = [] + + # Iterate over mapping.cfg + secrets = get_mapping_secrets_path() + for idx, secret_path in enumerate(secrets): + console.print( + f"Revealing secret [[green]{secret_path}[/green]] {idx + 1} of {len(secrets)}" + ) + + secret = OnePasswordItem(path=secret_path, ticket=ticket) + write_secret = True + if prompt and secret.exists_local: + diff = secret.diff() + if diff: + print_diff(diff) + if not questionary.confirm( + f"Do you want overwrite the local changes in [{secret.name}]?", + default=False, + ).ask(): + write_secret = False + secret.status = OnePasswordStatus.IGNORED + secret.message = "Cancelled by user" + + else: + secret.status = OnePasswordStatus.UPDATED + + else: + write_secret = False + secret.status = OnePasswordStatus.IGNORED + secret.message = "There are no changes" + + if write_secret: + secret.save_local() + + details.append(secret) + + # Shows all results from each item (secret) in OnePassword + print_result(title=f"Secrets Revealed from [{VAULT}]", details=details) + + except Exception as e: + console.print_exception(show_locals=True) + console.print(f"[bold red]{e}[/bold red]") + + +def sync_secrets(): + """ + Create or update secrets according to config file. + Use OnePassword cli to create or update secrets for a vault. + """ + try: + ticket = get_ticket_number_by_git_branch() + + secrets = get_mapping_secrets_path() + details = [] + for idx, secret_path in enumerate(secrets): + console.print( + f"Synchronizing secret [[green]{secret_path}[/green]] {idx + 1} of {len(secrets)}" + ) + secret = OnePasswordItem(path=secret_path, ticket=ticket) + secret = sync_item_in_one_password(secret=secret) + + details.append(secret) + + # Shows all results from each item (secret) in OnePassword + print_result(title=f"Secrets Synchronized in [{VAULT}]", details=details) + + except Exception as e: + console.print_exception(show_locals=True) + console.print(f"[bold red]{e}[/bold red]") + + +def merge_secrets(branch_to_merge: str): + """ + Merge secrets + """ + try: + ticket = branch_to_merge.split("-")[1] + details = merge_secrets_in_one_password(ticket=ticket) + + if len(details) == 0: + console.print(f"There are not secrets to merge with ticket [{ticket}]") + + else: + # Shows all results from each item (secret) in OnePassword + print_result(title=f"Secrets Merged in [{VAULT}]", details=details) + + except Exception as e: + console.print_exception(show_locals=True) + console.print(f"[bold red]{e}[/bold red]") + + +def sync_item_in_one_password(secret: OnePasswordItem) -> OnePasswordItem: + """Use OnePassword cli to create or update item secrets for a vault + + Args: + item (OnePasswordItem): Data item + + Returns: + OnePasswordItem: Data item + """ + + try: + if not secret.exists_local: + raise Exception("File does not exists") + + save_secret = False + diff_with_main = secret.diff(remote_to_local=False) + + if secret.is_working_in_main_branch and diff_with_main: + print_diff(diff_with_main) + save_secret = questionary.confirm( + f"Do you want to overwrite changes directly in main to [{secret.name}]?", + default=False, + ).ask() + + else: + if diff_with_main: + print_diff(diff_with_main) + save_secret = questionary.confirm( + f"Do you want {'update' if secret.exists_remote_ticket else 'create'} item [{secret.name}]?", + default=False, + ).ask() + + if not save_secret: + secret.message = "Cancelled by user" + secret.status = OnePasswordStatus.IGNORED + + else: + secret.message = secret.name + + else: + secret.clean_remote() + secret.message = "There are no changes" + secret.status = OnePasswordStatus.IGNORED + + if save_secret: + secret.save_remote() + + except Exception as e: + raise + secret.message = e + secret.status = OnePasswordStatus.FAILED + + return secret + + +def merge_secrets_in_one_password(ticket: str) -> list: + """ + Merge secrets + """ + + secrets = retrieve_secrets_from_one_password() + secrets_to_merge = filter(lambda x: x["title"].startswith(f"{ticket}-"), secrets) + details = [] + for secret in list(secrets_to_merge): + name_ticket = secret["title"] + name_main = name_ticket.replace(f"{ticket}-", "") + item = OnePasswordItem(path=name_main) + item.status = OnePasswordStatus.MERGED + item.message = name_ticket + + try: + # Get items olds to delete + filtered_to_delete = list( + filter(lambda x: x["title"] == name_main, secrets) + ) + + # Validation if the main secret has changes after the ticket secret was generated + secret_created_at = parser.parse(secret["created_at"]) + if filtered_to_delete: + main_updated_at = parser.parse(filtered_to_delete[0]["updated_at"]) + + if main_updated_at > secret_created_at: + raise Exception( + "Merge aborted. Original secret was changed since the clone was created." + ) + + # Update item in 1Password + secret_detail = subprocess.check_output( + [ + "op", + "item", + "get", + secret["id"], + "--vault", + VAULT, + "--format", + "json", + ] + ) + + secret_detail = json.loads(secret_detail) + path_to_save = "" + content = "" + for field in secret_detail.get("fields"): + if field["label"] == "path_to_save": + path_to_save = field["value"] + elif field["label"] == "content": + content = field["value"] + + new_item = { + "title": name_main, + "category": "SECURE_NOTE", + "fields": [ + { + "type": "STRING", + "label": "path_to_save", + "value": f"{path_to_save}", + }, + {"type": "STRING", "label": "content", "value": f"{content}"}, + ], + } + + # Workaround to Github actions + # https://developer.1password.com/docs/ci-cd/github-actions/#troubleshooting + new_item = json.dumps(new_item) + new_item_secret_name = tempfile.NamedTemporaryFile() + with new_item_secret_name as tmp: + tmp.write(new_item.encode("utf-8")) + + ps = subprocess.Popen(("cat", f"{tmp.name}"), stdout=subprocess.PIPE) + subprocess.check_output( + ( + "op", + "item", + "create", + "--vault", + VAULT, + "--tags", + "cli.py,merged", + ), + stdin=ps.stdout, + ) + ps.wait() + + subprocess.call( + ("op", "item", "delete", secret_detail["id"], "--vault", VAULT) + ) + + # Delete items olds + for item_to_delete in filtered_to_delete: + subprocess.call( + ("op", "item", "delete", item_to_delete["id"], "--vault", VAULT) + ) + + else: + item.status = OnePasswordStatus.CREATED + + except Exception as err: + item.message = err + item.status = OnePasswordStatus.FAILED + + finally: + details.append(item) + + return details + + +@lru_cache(maxsize=None) +def retrieve_secrets_from_one_password() -> list: + """Get list from 1Password + + Returns: + list: List items secrets + """ + secrets = subprocess.check_output( + ["op", "item", "list", "--vault", VAULT, "--format", "json"] + ) + + return json.loads(secrets) + + +@lru_cache(maxsize=None) +def get_mapping_secrets_path() -> list: + """Get list from secrets according to mapping.cfg + + Returns: + list: List items secrets + """ + with open(BASEDIR / MAPPING_SECRETS_PATH, "r") as f: + return f.read().splitlines() + + +def print_diff(lines: list): + if lines: + for line in lines: + console.print(line) + + +def print_result(title: str, details: list): + """Print result in console with format table. + + Args: + details (list): Rows to show + """ + + # Table to shows results + table = Table( + show_header=True, + header_style="bold blue", + title=f"\n[purple]{title}[/purple]", + box=box.SQUARE, + ) + table.add_column("#", style="dim", width=6) + table.add_column("Secret Name", min_width=20) + table.add_column("Path", min_width=20) + table.add_column("Status", min_width=20) + + # Add row in table results + for idx, detail in enumerate(details): + message = f"({detail.message})" if detail.message else "" + if detail.status == OnePasswordStatus.FAILED: + status = f"[red]{detail.status.name}[/red]" + + elif detail.status == OnePasswordStatus.IGNORED: + status = f"[yellow]{detail.status.name}[/yellow]" + + else: + status = f"[green]{detail.status.name}[/green]" + + table.add_row( + str(idx + 1), + detail.absolute_path.name, + str(detail.path_to_save), + f"{status} {message}", + ) + + # Shows all results from each item (secret) in OnePassword + console.print(table) + + +def get_ticket_number_by_git_branch(prompt=True): + """Return a number ticket. + + Raises: + Exception: If branch is not main and user does not provide the ticket number. + + Returns: + string | None: Ticket number if branch does not main and None if brach is main + """ + ticket = None + current_branch = cmd.output("git rev-parse --abbrev-ref HEAD").replace("\n", "") + if ( + current_branch != "main" + and current_branch != "prev" + and not current_branch.startswith("release") + ): + comps = current_branch.split("-") + ticket = comps[1] if len(comps) > 1 else None + if prompt: + ticket = questionary.text( + message="What's the ticket number?", + default=ticket, + validate=lambda text: ( + True if text.isdigit() else "The ticket must be numeric" + ), + ).ask() + else: + print(f"Ticket detected: {ticket}\n") + if not ticket: + raise Exception("You need a ticket number to continue.") + + return ticket diff --git a/scripts/shell/datacoves_bash_aliases.sh b/scripts/shell/datacoves_bash_aliases.sh new file mode 100644 index 00000000..99073265 --- /dev/null +++ b/scripts/shell/datacoves_bash_aliases.sh @@ -0,0 +1,5 @@ +alias kc=kubectl +alias kcb='kubectl -n dcw-dev123' +alias kcc='kubectl -n core' +alias kcp='kubectl -n prometheus' +alias kckind='kubectl config use-context kind-datacoves-cluster' diff --git a/scripts/shell/docker_manifest.sh b/scripts/shell/docker_manifest.sh new file mode 100755 index 00000000..5687db67 --- /dev/null +++ b/scripts/shell/docker_manifest.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +pass=${1} +repo=${2} +tag=${3} + +if [[ $repo != *"/"* ]]; then + repo="library/$repo" +fi + +basic=$(printf "datacovesprivate:$pass" | base64) +if [[ $repo != *"datacovesprivate/"* ]]; then + token=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:${repo}:pull" \ + | jq -r '.token') +else + token=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:${repo}:pull" \ + -H "Authorization: Basic $basic" | jq -r '.token') + +fi + +digest=$(curl -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + -H "Authorization: Bearer $token" -s "https://registry-1.docker.io/v2/${repo}/manifests/${tag}" | jq -r .config.digest) +curl -H "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + -H "Authorization: Bearer $token" \ + -s -L "https://registry-1.docker.io/v2/${repo}/blobs/${digest}" | jq .config \ No newline at end of file diff --git a/scripts/shell/docker_tags.sh b/scripts/shell/docker_tags.sh new file mode 100755 index 00000000..989bfa57 --- /dev/null +++ b/scripts/shell/docker_tags.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +pass=${1} +repo=${2} + +if [[ $repo != *"/"* ]]; then + repo="library/$repo" +fi + +basic=$(printf "datacovesprivate:$pass" | base64) + +tokenUri="https://auth.docker.io/token" +data=("service=registry.docker.io" "scope=repository:${repo}:pull") +if [[ $repo != *"datacovesprivate/"* ]]; then + token="$(curl --silent --get --data-urlencode ${data[0]} --data-urlencode ${data[1]} $tokenUri | jq --raw-output '.token')" +else + token="$(curl -H "Authorization: Basic $basic" --silent --get --data-urlencode ${data[0]} --data-urlencode ${data[1]} $tokenUri | jq --raw-output '.token')" +fi +listUri="https://registry-1.docker.io/v2/${repo}/tags/list" +authz="Authorization: Bearer $token" +result="$(curl --silent --get -H "Accept: application/json" -H "Authorization: Bearer $token" $listUri | jq --raw-output '.')" + +echo $result \ No newline at end of file diff --git a/scripts/shell/helm_repo.sh b/scripts/shell/helm_repo.sh new file mode 100644 index 00000000..5d3322f7 --- /dev/null +++ b/scripts/shell/helm_repo.sh @@ -0,0 +1,4 @@ +curl https://baltocdn.com/helm/signing.asc | sudo apt-key add - +sudo apt-get install apt-transport-https --yes +echo "deb https://baltocdn.com/helm/stable/debian/ all main" | sudo tee /etc/apt/sources.list.d/helm-stable-debian.list +sudo apt-get update diff --git a/scripts/shell/helm_utils.sh b/scripts/shell/helm_utils.sh new file mode 100644 index 00000000..dd1d3778 --- /dev/null +++ b/scripts/shell/helm_utils.sh @@ -0,0 +1,52 @@ +# Because helm sucks. + +helm_releases() { + [[ $# -ne 1 ]] && { + echo 'usage: helm_releases ' + return 1 + } + kubectl -n "$1" get secret -l owner=helm -o name | cut -d. -f5 | sort -u +} + +helm_release_json() { + [[ $# -ne 2 ]] && { + echo 'usage: helm_release_json ' + return 1 + } + namespace="$1"; release="$2" + kubectl -n "$namespace" get secret "sh.helm.release.v1.$release.v1" \ + -o jsonpath='{.data.release}' | base64 -D | base64 -D | gunzip +} + +helm_chart_name() { + helm_release_json "$@" | jq -r '.chart.metadata.name' +} + +helm_chart_json() { + helm_release_json "$@" | jq '.chart' +} + +helm_chart_metadata() { + helm_chart_json "$@" | jq '.metadata' +} + +helm_chart_name_with_repo() { + [[ $# -ne 2 ]] && { + echo 'usage: helm_chart_name_with_repo ' + return 1 + } + namespace="$1"; release="$2" + chart_name=$(helm_chart_name "$namespace" "$release"); + chart=$(helm search repo -r "\v[^/]+/$chart_name\v" -o json | jq -r .[].name) + echo "$release: $chart" +} + +helm_charts() { + [[ $# -ne 1 ]] && { + echo 'usage: helm_charts ' + return 1 + } + for rel in $(helm_releases "$1"); do + helm_chart_name_with_repo "$1" "$rel" + done +} diff --git a/scripts/stripe_copy.py b/scripts/stripe_copy.py new file mode 100755 index 00000000..78a778a7 --- /dev/null +++ b/scripts/stripe_copy.py @@ -0,0 +1,128 @@ +""" +This script was copied from https://gist.github.com/mikegogulski/83ce5f6ac0633ca6cac913d0dab4b9eb +""" + +### The best way to start is to use the "Delete all test data" button at https://dashboard.stripe.com/test/developers + +import stripe + +from scripts.stripe_utils import set_api_key + +TEST_DOMAIN = "datacoveslocal.com" + +SKIP_FIELDS = [ + "amount_decimal", + "unit_amount_decimal", + "type", + "object", + "created", + "livemode", + "updated", +] +IGNORE_PRODUCTS = [ + "prod_JNXXXyourproductID", +] + + +def clear_stripe_things(thing: tuple) -> None: + """ + Clear out test products/prices + """ + print("Clearing", thing[0]) + for p in getattr(stripe, thing[0]).list(): + print("Clearing", p.get("id"), "-", p.get("name")) + if p.get("product") in IGNORE_PRODUCTS: + print("Ignoring") + continue + if not p.get("active"): + print("Inactive, skipping") + continue + if p.get("livemode"): + print("LIVE MODE THING! Skipping") + continue + try: + getattr(stripe, thing[0]).modify(p.get("id"), active=False) + except Exception as e: + print("Exception modifying for", p) + print(e) + pass + try: + p.delete() + except Exception as e: + print("Exception deleting for", p.get("id")) + print(e) + continue + + +def upload_products(products) -> None: + """ + Copy production products to test, preserving IDs + """ + print("Uploading products") + up = list() + for p in products[1].get("data"): + print("Queueing", p.get("id"), "-", p.get("name")) + if p.get("product") in IGNORE_PRODUCTS: + print("Ignoring") + continue + up.append({k: v for k, v in p.items() if k not in SKIP_FIELDS}) + for p in up: + try: + del p["default_price"] + print("Uploading", p.get("id"), "-", p.get("name")) + print(up) + stripe.Product.create(**p) + except Exception as e: + print("EXCEPTION creating", p) + print("EXCEPTION:", e) + continue + + +def upload_prices(products, prices) -> None: + """ + Upload prices to Stripe, preserving product ID correspondences + """ + skips = SKIP_FIELDS + ["id", "unit_amount_decimal", "flat_amount_decimal"] + for p in products[1]: + print("Uploading for", p.get("id"), "-", p.get("name")) + if not p.get("active"): + print("Inactive product, skipping") + continue + + for prod_price in prices[1]: + if prod_price.get("product") == p.get("id"): + # remove the "flat_amount_decimal" and "unit_amount_decimal" keys from the tiers + for tier in prod_price.get("tiers", []): + del tier["flat_amount_decimal"] + del tier["unit_amount_decimal"] + tier["up_to"] = ( + "inf" if tier.get("up_to") is None else tier.get("up_to") + ) + print("prod_price", prod_price) + test_price = {k: v for k, v in prod_price.items() if k not in skips} + print("test_price", test_price) + try: + stripe.Price.create(**test_price) + except Exception as e: + print("EXCEPTION creating price", p) + print("EXCEPTION:", e) + continue + + +def copy_to_test(cluster_domain): + """ + Copies products and prices from a cluster stripe account into + datacoveslocal.com associated stripe test mode account + """ + set_api_key(cluster_domain) + prodproducts = ("Product", stripe.Product.list(active=True, limit=100)) + prodprices = ( + "Price", + stripe.Price.list(active=True, limit=100, expand=["data.tiers"]), + ) + + set_api_key(TEST_DOMAIN) + clear_stripe_things(prodprices) + clear_stripe_things(prodproducts) + upload_products(prodproducts) + upload_prices(prodproducts, prodprices) diff --git a/scripts/stripe_utils.py b/scripts/stripe_utils.py new file mode 100644 index 00000000..0fc43921 --- /dev/null +++ b/scripts/stripe_utils.py @@ -0,0 +1,137 @@ +from collections import defaultdict +from pathlib import Path + +import stripe + +from lib.config_files import load_file, write_yaml + +DEFAULT_PLAN_VARIANT = "standard" + + +def set_api_key(cluster_domain): + path = Path("config") / cluster_domain / "secrets" / "core-api.env" + stripe.api_key = load_file(path)["STRIPE_API_KEY"] + + +def download_pricing_model(cluster_domain): + """ + Downloads pricing model from Stripe + """ + set_api_key(cluster_domain) + path = Path("config") / cluster_domain / "pricing.yaml" + current_model = load_file(path) + model = pricing_model() + merge_current_into_new( + model["products"], + current_model.get("products", {}), + {"tally_name": "", "service_name": ""}, + ) + merge_current_into_new( + model["plans"], + current_model.get("plans", {}), + {"environment_quotas": {}, "trial_period_days": 0}, + ) + write_yaml(path, model) + + +def merge_current_into_new(news, currents, fields): + for k, new in news.items(): + current = currents.get(k, {}) + for f, default in fields.items(): + new[f] = current.get(f, default) + + +def pricing_model(cluster_domain=None): + if cluster_domain: + set_api_key(cluster_domain) + products = get_actives(stripe.Product) + prices = get_actives(stripe.Price) + + prices_by_product = defaultdict(list) + for price in prices.values(): + if price.product in products: + prices_by_product[price.product].append(price) + + plans = defaultdict(dict) + prices_by_plan = defaultdict(list) + for p in products.values(): + plan_meta = p.metadata.get("plans", p.metadata.get("plan", "")) + for plan_slug in plan_meta.split(","): + if not plan_slug: + continue + plan = plans[plan_slug] + kind, billing_period = plan_slug.rsplit("-", 1) + plan["kind"] = kind + plan["billing_period"] = billing_period + plan["prices"] = prices_by_plan[plan_slug] + + for price in prices_by_product[p.id]: + price_data = price.to_dict_recursive() + price_data.pop("object") + assert price_data.pop("active") + + plan["prices"].append(price_data) + + seat_products = [] + + for product_id, product in products.items(): + charges_per_seat = product.unit_label == "seat" + if charges_per_seat: + seat_products.append(product_id) + products[product_id] = { + "id": product_id, + "name": product.name, + "description": product.description or "", + "stripe_data": product.to_dict_recursive(), + "charges_per_seat": charges_per_seat, + } + + for plan in plans.values(): + plan["variants"] = plan_prices_to_variants( + plan["prices"], seat_products, DEFAULT_PLAN_VARIANT + ) + del plan["prices"] + + return { + "products": products, + "plans": dict(plans), + } + + +def get_actives(resource_class): + it = resource_class.list().auto_paging_iter() + return {p.id: p for p in it if p.active} + + +def plan_prices_to_variants(prices, seat_products, default_variant): + """ + Returns a list of 'variants' + [ + { + 'standard': { + 'default': True, + 'items': [] + } + } + ] + """ + data = [] + # organize prices by nickname: standard, pro (different, customized amounts) + prices_dict = defaultdict(list) + for price in prices: + variant = price.get("nickname") or default_variant + prices_dict[variant].append(price) + + for variant in prices_dict: + seat_items = [] + other_items = [] + for price in prices_dict[variant]: + item = {"price": price} + if price["product"] in seat_products: + seat_items.append(item) + else: + other_items.append(item) + # Seat product prices go first as stripe's checkout page features the first item + items = seat_items + other_items + data.append({variant: {"items": items, "default": variant == default_variant}}) + return data diff --git a/scripts/translators.py b/scripts/translators.py new file mode 100644 index 00000000..273dce3d --- /dev/null +++ b/scripts/translators.py @@ -0,0 +1,23 @@ +def dockerfile_to_python(filename): + with open(filename, "r") as f: + lines = (l.strip() for l in f.readlines()) + lines = [l for l in lines if l] + i = 0 + while i < len(lines): + line = lines[i] + if line.startswith("#"): + print(" " + line) + i += 1 + continue + directive = [] + while line.endswith("\\"): + directive.append(line[:-1]) + i += 1 + line = lines[i] + directive.append(line) + op, directive[0] = directive[0].split(maxsplit=1) + args = map(repr, directive) + args = map(lambda x: x if len(x) + 8 < 120 else f"{x} #noqa E501", args) + argstr = "\n ".join(args) + print(f" d.{op}({argstr})") + i += 1 diff --git a/scripts/utils/airbyte_config/sample.csv b/scripts/utils/airbyte_config/sample.csv new file mode 100644 index 00000000..dd42ea5a --- /dev/null +++ b/scripts/utils/airbyte_config/sample.csv @@ -0,0 +1,2 @@ +COUNTRY_ISO_CODE|COUNTRY_NAME|REGION|ID|OWNERID|ISDELETED|NAME|CURRENCYISOCODE|RECORDTYPEID|CREATEDDATE|CREATEDBYID|LASTMODIFIEDDATE|LASTMODIFIEDBYID|SYSTEMMODSTAMP|LASTACTIVITYDATE|MAYEDIT|ISLOCKED|LASTVIEWEDDATE|LASTREFERENCEDDATE|CONNECTIONRECEIVEDID|CONNECTIONSENTID|CONSUMER_SITE__C|PRODUCT_INFO__C|THERAPEUTIC_CLASS_VOD__C|PARENT_PRODUCT_VOD__C|THERAPEUTIC_AREA_VOD__C|PRODUCT_TYPE_VOD__C|REQUIRE_KEY_MESSAGE_VOD__C|COST_VOD__C|EXTERNAL_ID_VOD__C|MANUFACTURER_VOD__C|COMPANY_PRODUCT_VOD__C|CONTROLLED_SUBSTANCE_VOD__C|DESCRIPTION_VOD__C|SAMPLE_QUANTITY_PICKLIST_VOD__C|DISPLAY_ORDER_VOD__C|NO_METRICS_VOD__C|DISTRIBUTOR_VOD__C|SAMPLE_QUANTITY_BOUND_VOD__C|SAMPLE_U_M_VOD__C|NO_DETAILS_VOD__C|QUANTITY_PER_CASE_VOD__C|SCHEDULE_VOD__C|RESTRICTED_VOD__C|PRICING_RULE_QUANTITY_BOUND_VOD__C|NO_PROMO_ITEMS_VOD__C|USER_ALIGNED_VOD__C|RESTRICTED_STATES_VOD__C|SORT_CODE_VOD__C|NO_CYCLE_PLANS_VOD__C|INVENTORY_ORDER_UOM_VOD__C|INVENTORY_QUANTITY_PER_CASE_VOD__C|JJ_PRODUCT_COUNTRY__C|VEXTERNAL_ID_VOD__C|JJ_PRODUCT_REGION__C|JJ_ALLOW_SEGMENT__C|JJ_STRENGTH_OF_THE_PRODUCT__C|JJ_CONDITIONING__C|PRICING_BOUND_VOD__C|DM_EXTERNAL_ID_C__C|JJ_DATALOADID__C|JJ_DM_EXTERNAL_ID__C|JJ_ACTIVE__C|JJ_AVAILABLE_FOR_CSC__C|JJ_DETAIL_SUB_TYPE__C|JJ_FORMULATION_PARENT__C|JJ_FORMULATION__C|JJ_GENERIC_NAME_PARENT__C|JJ_GENERIC_NAME__C|JJ_INDICATION_2__C|JJ_INDICATION__C|JJ_INVESTIGATIONAL_NAME_PARENT__C|JJ_INVESTIGATIONAL_NAME__C|JJ_JANSSEN_LIFE_CYCLE_PARENT__C|JJ_JANSSEN_LIFE_CYCLE__C|JJ_PRICE_OF_PROMOTIONAL_ITEM__C|JJ_PRODUCT_ATC_CODE_PARENT__C|JJ_PRODUCT_ATC_CODE__C|JJ_PRODUCT_END_DATE__C|JJ_PRODUCT_SKU_CODE__C|JJ_PRODUCT_START_DATE__C|JJ_REASON_FOR_PRODUCT_END_PARENT__C|JJ_REASON_FOR_PRODUCT_END__C|JJ_RELEVANT_FOR_EVENTS__C|JJ_THERAPEUTIC_AREA_2__C|JJ_TRADENAME_PARENT__C|JJ_TRADENAME__C|PRODUCT_SKU_CODE_PARENT__C|PRODUCT_IDENTIFIER_VOD__C|PRODUCT_VALUE_VOD__C|JJ_SAP_SKU_CODE__C|CREATE_LOT_CATALOG_VOD__C|INVENTORY_MONITORING_VOD__C|MASTER_ALIGN_ID_VOD__C|PRODUCT_THUMBNAIL_VOD__C|BUNDLE_PACK_VOD__C|COLD_CHAIN_VOD__C|ZVOD_CUSTOM_TEXT_VOD__C|REQUIRE_DISCUSSION_VOD__C|FIXED_UM_VOD__C|STATE_DISTRIBUTOR_CATEGORY_VOD__C|HEIGHT_VOD__C|WIDTH_VOD__C|JJ_COMBO_PRODUCT__C +"OM"|"Oman"|"GCC"|"a001p000012TuxGAAS"|"005U0000001cKOvIAM"|"0"|"Selexipag-OM°"|"SAR"|"012U0000000QEzcIAG"|"20190123 13:38:36"|"005U0000001cKOvIAM"|"20191204 15:05:57"|"005U0000004PKM6IAO"|"20191204 15:05:57"|""|"1"|"0"|""|""|""|""|""|""|""|"a001p000012TuxBAAS"|"Pulmonary arterial hypertension"|"Detail"|"0"|""|"Selexipag-OM°Oman"|""|"1"|"0"|""|""|""|"1"|""|"0"|""|"0"|""|""|"0"|"0"|"TRUE"|"0"|""|""|"0"|""|""|"Oman"|"Selexipag-OM°::Oman"|"GCC"|"0"|""|""|"0"|""|""|""|"1"|"0"|"Call"|""|""|"Selexipag"|""|""|"Pulmonary arterial hypertension"|""|""|"Priority"|""|""|""|""|""|""|"20191201 00:00:00"|""|""|"0"|""|"."|""|""|""|""|""|"0"|"0"|""|""|"0"|"0"|"0"|""|"0"|""|""|""|"" diff --git a/scripts/utils/airbyte_config/schema_from_csv.py b/scripts/utils/airbyte_config/schema_from_csv.py new file mode 100755 index 00000000..c951e74e --- /dev/null +++ b/scripts/utils/airbyte_config/schema_from_csv.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +import csv +import json +import sys + +if __name__ == "__main__": + program_name, *all_args = sys.argv + + if len(all_args) == 0: + path = input("Please enter path to csv: ") + else: + path = all_args[0] + + delimiter = input("Specify delimiter char (default is ','): ") or "," + quote = input("Specify quote char (default is '\"'): ") or '"' + + with open(path, newline="") as csvfile: + reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quote) + for row in reader: + schema = {col: "string" for col in row} + print(json.dumps(schema)) + break diff --git a/scripts/utils/delete_error_pods.sh b/scripts/utils/delete_error_pods.sh new file mode 100755 index 00000000..56792baa --- /dev/null +++ b/scripts/utils/delete_error_pods.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +# Set the namespace where your pods reside +NAMESPACE="dcw-gay725" + +# Get the list of pods with the status "Error" +PODS_ERROR=$(kubectl get pods -n "$NAMESPACE" --field-selector=status.phase=Failed -o jsonpath='{.items[*].metadata.name}') + +# Check if there are any pods with the status "Error" +if [ -z "$PODS_ERROR" ]; then + echo "No pods with the status 'Error' found in namespace '$NAMESPACE'. Nothing to delete." + exit 0 +fi + +# Loop through the list of pods with the status "Error" and delete them +for POD_NAME in $PODS_ERROR; do + kubectl delete pod "$POD_NAME" -n "$NAMESPACE" +done + +echo "Deleted all pods with the status 'Error' in namespace '$NAMESPACE'." diff --git a/scripts/utils/force_push_images.py b/scripts/utils/force_push_images.py new file mode 100755 index 00000000..7e39ec15 --- /dev/null +++ b/scripts/utils/force_push_images.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python3 + +# Example: ./force_push_images.py 2.3.202405291520 kenvue.jfrog.io/dco-docker databricks,bigquery,redshift +import subprocess +import sys +from pathlib import Path + +import yaml + +extra_images = [ + "airbyte/source-s3:4.1.4", + "airbyte/source-file:0.3.15", + "airbyte/source-snowflake:0.2.2", + "airbyte/source-postgres:3.2.20", + "airbyte/source-postgres:3.3.8", + "airbyte/source-mssql:3.0.0", + "airbyte/source-redshift:0.4.0", + "airbyte/source-salesforce:2.1.5", + "airbyte/source-azure-blob-storage:0.2.2", + "airbyte/source-azure-table:0.1.3", + "airbyte/source-bigquery:0.3.0", + "airbyte/source-dv-360:0.1.0", + "airbyte/source-jira:0.10.2", + "airbyte/source-shopify:1.1.4", + "airbyte/source-snapchat-marketing:0.3.0", + "airbyte/source-oracle:0.4.0", + "airbyte/destination-snowflake:3.4.9", + "airbyte/destination-postgres:0.4.0", + "airbyte/destination-s3:0.5.4", + "airbyte/destination-redshift:0.6.9", + "airbyte/normalization-redshift:0.4.3", + "airbyte/normalization-snowflake:0.4.3", + "airbyte/normalization-mssql:0.4.3", + "airbyte/source-bigquery:0.4.1", + "airbyte/source-sftp:0.2.1", +] + + +def _get_images(release_name, exclude_images=[]) -> list: + release_file = Path("../../releases") / (release_name + ".yaml") + images_secction = [ + "airbyte_images", + "airflow_images", + "ci_images", + "core_images", + "images", + "observability_images", + ] + images = [] + with open(release_file) as f: + content = yaml.safe_load(f) + for secction in images_secction: + items = content.get(secction) + if isinstance(items, list): + for image in content.get(secction): + exclude = list(filter(lambda x: x in image, exclude_images)) + if not exclude: + images.append(image) + elif isinstance(items, dict): + for image, tag in content.get(secction).items(): + image = f"{image}:{tag}" + exclude = list(filter(lambda x: x in image, exclude_images)) + if not exclude: + images.append(image) + + images.extend(extra_images) + return images + + +def _pull_images(images): + for image in images: + cmd = f"docker pull {image}" + subprocess.run(cmd.split()) + + +def _push_images(images, repo): + for image in images: + cmd = f"docker push {repo}/{image}" + subprocess.run(cmd.split()) + + +def _retag_images(images, target_repo): + for image in images: + cmd = f"docker tag {image} {target_repo}/{image}" + subprocess.run(cmd.split()) + + +if __name__ == "__main__": + _, release_name, target_repo, exclude_images = sys.argv + exclude_images = exclude_images.split(",") + images = _get_images(release_name=release_name, exclude_images=exclude_images) + _pull_images(images=images) + _retag_images(images=images, target_repo=target_repo) + _push_images(images=images, repo=target_repo) diff --git a/scripts/utils/unused_secrets.sh b/scripts/utils/unused_secrets.sh new file mode 100755 index 00000000..16d1fbb9 --- /dev/null +++ b/scripts/utils/unused_secrets.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +read -p "Namespace? " namespace + +depSecrets=$(kubectl -n "$namespace" get deploy -o jsonpath='{.items[*].spec.template.spec.volumes[*].secret.secretName}' | xargs -n1 | grep user-secrets) + +# Get the secrets to be deleted +secretsToDelete=$(comm -13 \ + <(echo "$depSecrets" | sort | uniq) \ + <(kubectl -n "$namespace" get secrets -o jsonpath='{.items[*].metadata.name}' | xargs -n1 | grep user-secrets | sort | uniq)) + +# Display the secrets to be deleted +echo "Secrets to be deleted in namespace $namespace:" +echo "$secretsToDelete" + +# Ask for confirmation +read -p "Do you want to delete these secrets? (y/n): " confirm + +if [ "$confirm" == "y" ]; then + # Perform the deletion + echo "$secretsToDelete" | xargs -I{} kubectl -n "$namespace" delete secret "{}" + + echo "Secrets deleted successfully." +else + echo "Deletion canceled." +fi diff --git a/scripts/versions.py b/scripts/versions.py new file mode 100644 index 00000000..0a936722 --- /dev/null +++ b/scripts/versions.py @@ -0,0 +1,523 @@ +import base64 +from os import environ, listdir +from pathlib import Path + +import questionary +import requests + +from lib import cmd +from lib.config_files import ( + load_file, + load_yaml, + replace_in_file, + secret_value_from_yaml, + write_file, + write_yaml, +) +from lib.utils import force_ipv4 + +from .docker_images import ( + latest_version_tags, + public_repos_from_paths, + repos_from_paths, +) +from .github import Releaser +from .releases import all_releases, generate_release_name + +# TODO: airbyte, airflow, and superset runtime images could be dynamically determined given a helm chart version +# see https://github.com/helm-lab/helm-images/blob/master/images.sh +AIRBYTE_CHART = { + "repo": "https://airbytehq.github.io/helm-charts", + "repo_name": "airbyte", + "chart": "airbyte/airbyte", + "version": "1.6.0", + "app_version": "1.6.0", +} +AIRBYTE_IMAGES = [ + "busybox:1.35", + "alpine/socat:1.7.4.1-r1", + "curlimages/curl:8.1.1", + "airbyte/bootloader:1.6.0", + "bitnami/kubectl:1.28.9", + "airbyte/server:1.6.0", + "airbyte/webapp:1.6.0", + "airbyte/worker:1.6.0", + "airbyte/cron:1.6.0", + "airbyte/mc:latest", + "airbyte/connector-builder-server:1.6.0", + "airbyte/connector-sidecar:1.6.0", + "airbyte/container-orchestrator:1.6.0", + "airbyte/workload-api-server:1.6.0", + "airbyte/workload-init-container:1.6.0", + "airbyte/workload-launcher:1.6.0", + "airbyte/async-profiler:1.6.0", + "airbyte/source-declarative-manifest:6.45.7", + "airbyte/workload-api-server:1.6.0", + "temporalio/auto-setup:1.26", + # this image used to be built by datacoves, kept just for compatibility with older helm chart + "datacovesprivate/airbyte-temporal:1.4.202303160208-bce12507", + # this image used to be built by datacoves, introducing custom docker registry + # via the env var JOB_KUBE_MAIN_CONTAINER_IMAGE_REGISTRY + # https://github.com/datacoves/airbyte-platform/commit/68120f4578bd1291e2b2e06a1511a4b2a4869024 + # kept just for compatibility with older helm chart + "datacovesprivate/airbyte-worker:0.50.25-patched", +] + +# Forked just to support hostAliases, rollback to official once PR is approved +AIRFLOW_CHART = { + "repo": "https://airflow.apache.org", + "repo_name": "apache-airflow", + "chart": "apache-airflow/airflow", + "version": "1.15.0", + "app_version": "2.10.3", +} +MINIO_CHART = { + "repo": "https://charts.bitnami.com/bitnami", + "repo_name": "bitnami-minio", + "chart": "bitnami-minio/minio", + "version": "11.x.x", +} +AIRFLOW_IMAGES = [ + "busybox:1.36", + # Common tag prefixes must be ordered shortest first. + "apache/airflow:airflow-pgbouncer-2024.01.19-1.21.0", + "apache/airflow:airflow-pgbouncer-exporter-2024.06.18-0.17.0", + "quay.io/prometheus/statsd-exporter:v0.27.2", + "amazon/aws-cli:2.18.7", +] + +SUPERSET_CHART = { + "repo": "https://apache.github.io/superset", + "repo_name": "apache-superset", + "chart": "apache-superset/superset", + "version": "0.10.6", + "app_version": "2.1.0", +} +SUPERSET_IMAGES = ["bitnami/redis:7.0.10-debian-11-r4", "apache/superset:dockerize"] + +ELASTIC_CHART = { + "repo": "https://helm.elastic.co", + "repo_name": "elastic", + "chart": "elastic/elasticsearch", + "version": "7.17.3", + "app_version": "7.17.3", +} +ELASTIC_IMAGES = ["docker.elastic.co/elasticsearch/elasticsearch:7.17.3"] + +NEO4J_CHART = { + "repo": "https://helm.neo4j.com/neo4j", + "repo_name": "neo4j", + "chart": "neo4j/neo4j", + "version": "5.11.0", + "app_version": "5.11.0", +} +NEO4J_IMAGES = ["neo4j:5.11.0", "bitnami/kubectl:1.27"] +# neo4j:4.4.9-community + + +POSTGRESQL_CHART = { + "repo": "https://charts.bitnami.com/bitnami", + "repo_name": "bitnami", + "chart": "bitnami/postgresql", + "version": "11.6.26", + "app_version": "14.4.0", +} +POSTGRESQL_IMAGES = ["bitnami/postgresql:14.4.0-debian-11-r23"] + +KAFKA_CHART = { + "repo": "https://charts.bitnami.com/bitnami", + "repo_name": "bitnami", + "chart": "bitnami/kafka", + "version": "26.11.2", + "app_version": "3.6.1", +} +KAFKA_IMAGES = [ + "bitnami/kafka:3.6.1-debian-12-r12", + "bitnami/zookeeper:3.9.2-debian-12-r8", +] + +DATAHUB_CHART = { + "repo": "https://helm.datahubproject.io", + "repo_name": "datahub", + "chart": "datahub/datahub", + "version": "0.4.36", + "app_version": "0.14.1", +} +DATAHUB_IMAGES = [ + "acryldata/datahub-gms:v0.14.1", + "acryldata/datahub-frontend-react:v0.14.1", + "acryldata/datahub-actions:v0.1.1", + "acryldata/datahub-elasticsearch-setup:v0.14.1", + "acryldata/datahub-kafka-setup:v0.14.1", + "acryldata/datahub-postgres-setup:v0.14.1", + "acryldata/datahub-upgrade:v0.14.1", + "acryldata/datahub-mae-consumer:v0.14.1", + "acryldata/datahub-mce-consumer:v0.14.1", +] + +PROMTAIL_CHART = { + "repo": "https://grafana.github.io/helm-charts", + "repo_name": "grafana", + "chart": "grafana/promtail", + "version": "6.15.5", + "app_version": "2.9.3", +} + +OBSERVABILITY_IMAGES = [ + "quay.io/prometheus/alertmanager:v0.27.0", + "quay.io/prometheus/prometheus:v2.54.1", + "library/busybox:1.31.1", + "kiwigrid/k8s-sidecar:1.27.4", + "quay.io/prometheus-operator/prometheus-config-reloader:v0.77.1", + "ghcr.io/jimmidyson/configmap-reload:v0.12.0", + "grafana/grafana:11.2.2", + "grafana/loki:2.9.10", + "grafana/promtail:2.9.10", + "registry.k8s.io/kube-state-metrics/kube-state-metrics:v2.13.0", + "registry.k8s.io/ingress-nginx/kube-webhook-certgen:v20221220-controller-v1.5.1-58-g787ea74b6", + "quay.io/prometheus/node-exporter:v1.8.2", + "quay.io/prometheus-operator/prometheus-operator:v0.77.1", + "bitnami/postgresql:15.2.0-debian-11-r26", + "nginxinc/nginx-unprivileged:1.27.1-alpine", + "grafana/agent:v0.42.0", + "bats/bats:v1.4.1", + "grafana/mimir:2.13.0", + "quay.io/minio/minio:RELEASE.2023-09-30T07-02-29Z", + "ghcr.io/blind-oracle/cortex-tenant:1.13.0", + "grafana/rollout-operator:v0.17.0", + "docker.io/nginxinc/nginx-unprivileged:1.25-alpine", + "quay.io/minio/mc:RELEASE.2023-09-29T16-41-22Z", +] + +CORE_IMAGES = [ + "gcr.io/kaniko-project/executor:v1.9.2-debug", + "bitnami/minio:2022.6.25-debian-11-r0", + "registry.k8s.io/git-sync/git-sync:v4.1.0", + "pomerium/pomerium:v0.15.0", + "bitnami/redis:7.0.11-debian-11-r0", + "bitnami/redis-exporter:1.48.0-debian-11-r5", + "bitnami/postgresql:15.3.0-debian-11-r17", + "registry.k8s.io/pause:3.9", + "gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0", +] + +DEPRECATED = { + "deployments": ["rabbitmq", "worker"], + "charts": ["rabbitmq"], + "hpas": ["worker"], +} + +PROFILE_FLAGS = { + "global": {}, + "dbt-snowflake": {}, + "dbt-redshift": {}, + "dbt-databricks": {}, + "dbt-bigquery": {}, +} + +# ------------- These images are just for reference, not needed on prod environments. (SC=Self Contained) -------------- +AIRBYTE_SC_IMAGES = [ + # https://github.com/bitnami/charts/blob/2710baea1eb548209e9f97627f632987be5f5daf/bitnami/postgresql/values.yaml + "airbyte/db:1.6.0", + # https://github.com/bitnami/charts/blob/77daeb13bdcce999ce5852c2c62cc803ec9e7d9f/bitnami/minio/values.yaml + "minio/minio:RELEASE.2023-11-20T22-40-07Z", +] + +AIRFLOW_SC_IMAGES = [ + "bitnami/postgresql:11.12.0-debian-10-r44", +] + +SUPERSET_SC_IMAGES = [ + # https://github.com/bitnami/charts/blob/52a2c99a89018659f18b774585bb10954625a215/bitnami/postgresql/values.yaml + "bitnami/postgresql:11.10.0-debian-10-r24", + # https://github.com/bitnami/charts/blob/a2e8beac0d1ef76dd64bbf67e82e92c1e3281970/bitnami/redis/values.yaml + "bitnami/redis:6.2.6-debian-10-r120", +] +# ------------------------------------------------------------------------------------------------------ + +PACKAGE_VERSIONS = { + "airbyte": {"version_prefix": "version.airbyte", "chart": AIRBYTE_CHART}, + "airflow": { + "version_prefix": "version.airflow", + "providers_prefix": "provider.airflow.", + "chart": AIRFLOW_CHART, + }, + "code_server": { + "version_prefix": "version.code-server", + "libraries_prefix": "library.code-server.", + "extensions_prefix": "extension.code-server.", + }, + "dbt": {"version_prefix": "library.code-server.dbt-core"}, + "superset": {"version_prefix": "version.superset", "chart": SUPERSET_CHART}, +} + + +def generate_release(): + name, timestamp, ticket = generate_release_name() + is_prerelease = ticket is not None + if is_prerelease: + print("Generating a pre-release.\n") + version = str(load_yaml(".version.yml")["version"]) + latest_private_tags = latest_version_tags(version, repos_from_paths(), name) + latest_public_tags = latest_version_tags(version, public_repos_from_paths(), name) + images = latest_private_tags.copy() + images.update(latest_public_tags) + release = extract_tools_version(images, ignore_mismatchs=name.startswith("pre")) + commit = cmd.output("git rev-parse HEAD").strip() + release.update( + { + "commit": commit, + "released_at": timestamp, + "images": latest_private_tags, + "ci_images": latest_public_tags, + "airbyte_chart": AIRBYTE_CHART, + "airbyte_images": AIRBYTE_IMAGES, + "airflow_chart": AIRFLOW_CHART, + "minio_chart": MINIO_CHART, + "airflow_images": AIRFLOW_IMAGES, + "superset_chart": SUPERSET_CHART, + "superset_images": SUPERSET_IMAGES, + "elastic_chart": ELASTIC_CHART, + "elastic_images": ELASTIC_IMAGES, + "neo4j_chart": NEO4J_CHART, + "neo4j_images": NEO4J_IMAGES, + "postgresql_chart": POSTGRESQL_CHART, + "postgresql_images": POSTGRESQL_IMAGES, + "kafka_chart": KAFKA_CHART, + "kafka_images": KAFKA_IMAGES, + "datahub_chart": DATAHUB_CHART, + "datahub_images": DATAHUB_IMAGES, + "promtail_chart": PROMTAIL_CHART, + "observability_images": OBSERVABILITY_IMAGES, + "core_images": CORE_IMAGES, + "deprecated": DEPRECATED, + "name": name, + # It could also include stable, beta, or a customer alias + "channels": ["edge"], + "profile_flags": PROFILE_FLAGS, + } + ) + folder = Path("releases") + folder.mkdir(parents=True, exist_ok=True) + filename = name + ".yaml" + + write_yaml(folder / filename, release) + upload = True + + if is_prerelease: + upload = questionary.confirm("Upload pre-release?").ask() + + if upload: + Releaser().create_release(name, commit, is_prerelease=is_prerelease) + print( + f"Release {name} successfully generated and uploaded to GitHub. Please review it and publish it." + ) + else: + print(f"Release {name} successfully generated. It was not uploaded to GitHub.") + + return name + + +def upload_releases(): + """ + Creates one release per file in the /releases folder. This should be run once. + """ + releaser = Releaser() + releases_dir = Path("releases") + releases = [f for f in listdir(releases_dir) if Path(releases_dir / f).is_file()] + for release_name in sorted(releases): + print(f"Creating GitHub release {release_name}.") + release_yaml = load_yaml(releases_dir / release_name) + releaser.create_release( + release_yaml["name"], release_yaml["commit"], release_yaml.get("notes", "") + ) + + +def combined_release_notes(cluster_domain: str = None, from_release: str = None): + if cluster_domain: + cluster_params = load_file(f"config/{cluster_domain}/cluster-params.yaml") + from_release = cluster_params.get("release") + + combined = "" + for release_name in all_releases(): + if release_name > from_release: + release = load_yaml(f"releases/{release_name}.yaml") + release_notes = release.get("notes") + if release_notes: + release_notes = release_notes.replace("#", "##").replace("\n", "\n\n") + combined += f"# Release {release_name}\n\n{release_notes}\n\n" + path = "combined.md" + write_file(path, combined.replace("\n\n\n", "\n\n").replace("\n\n\n", "\n\n")) + print(f"File {path} created.") + + +def version_dependencies_summary(images, ignore_mismatchs=False): + versioning_prefixes = [ + "com.datacoves.version.", + "com.datacoves.library.", + "com.datacoves.extension.", + "com.datacoves.provider.", + ] + summary = {} + + pswd = environ.get("DOCKER_PASSWORD") or secret_value_from_yaml( + Path("secrets/cli.secret.yaml"), "docker_password" + ) + + for image, tag in images.items(): + print(f"Getting labels for {image}:{tag}...") + + labels = get_docker_manifest(pswd, image, tag).get("Labels") + + if labels: + for label, version in labels.items(): + if not label.startswith(tuple(versioning_prefixes)): + continue + + label = label.replace("com.datacoves.", "") + + if ( + not ignore_mismatchs + and label in summary + and version != summary[label] + ): + # E.g. all airbyte images must be from the same airbyte + # version, so we label them all with the same + # com.datacoves.version.airbyte + raise Exception( + f"LABEL {versioning_prefixes}{label} must have the " + "same value if specified in multiple Dockerfiles." + ) + + summary[label] = version + + return summary + + +# 2024.11 - Requests to the dockerhub api were hanging while the same request +# made with curl were not. Isse fixed by using ipv4 instead of ipv6. +# https://stackoverflow.com/questions/52885446/python-requests-module-hangs-on-socket-connection-but-curl-works +@force_ipv4 +def get_docker_manifest(pswd, repo, tag): + """Returns docker manifest using either docker or OCI standard""" + is_private = "datacovesprivate" in repo + + if "/" not in repo: + repo = "library/" + repo + + headers = None + if is_private: + # If image is stored on a private registry, add auth token + token = base64.b64encode(f"datacovesprivate:{pswd}".encode()) + headers = {"Authorization": f"Basic {token.decode()}"} + + # Getting session token + response = requests.get( + f"https://auth.docker.io/token?service=registry.docker.io&scope=repository:{repo}:pull", + headers=headers, + ) + response.raise_for_status() + token = response.json()["token"] + + # Trying with docker standard response + response = requests.get( + f"https://registry-1.docker.io/v2/{repo}/manifests/{tag}", + headers={ + "Authorization": f"Bearer {token}", + "Accept": "application/vnd.docker.distribution.manifest.v2+json", + }, + ) + blob_digest = None + if response.status_code == 200: + blob_digest = response.json().get("config", {}).get("digest") + else: + # If not 200, it probably means the manifest was uploaded using OCI format + response = requests.get( + f"https://registry-1.docker.io/v2/{repo}/manifests/{tag}", + headers={ + "Authorization": f"Bearer {token}", + "Accept": "application/vnd.oci.image.index.v1+json", + }, + ) + response.raise_for_status() + digest = None + if response.status_code == 200: + # In OCI, the resposne could include multiple manifests, one per platform + manifests = response.json().get("manifests") + + for manifest in manifests: + if manifest["platform"]["os"] == "linux": + digest = manifest["digest"] + break + if digest: + # If manifest digest was retrieved, get the details to extract the blob digest + response = requests.get( + f"https://registry-1.docker.io/v2/{repo}/manifests/{digest}", + headers={ + "Authorization": f"Bearer {token}", + "Accept": "application/vnd.oci.image.manifest.v1+json", + }, + ) + response.raise_for_status() + blob_digest = response.json()["config"]["digest"] + + if not blob_digest: + raise Exception(f"No blob digest found for {repo}") + + response = requests.get( + f"https://registry-1.docker.io/v2/{repo}/blobs/{blob_digest}", + headers={ + "Authorization": f"Bearer {token}", + "Accept": "application/vnd.oci.image.index.v1+json", + }, + ) + response.raise_for_status() + details = response.json() + return details["config"] + + +def extract_tools_version(images, ignore_mismatchs=False): + packages_data = version_dependencies_summary( + images, ignore_mismatchs=ignore_mismatchs + ) + + versions = {} + for package, metadata in PACKAGE_VERSIONS.items(): + # If metadata points to a chart and it contains app_version, use it + chart_app_version = metadata.get("chart", {}).get("app_version") + if chart_app_version: + versions[f"{package}_version"] = chart_app_version + else: + versions[f"{package}_version"] = packages_data[metadata["version_prefix"]] + + if "libraries_prefix" in metadata: + versions[f"{package}_libraries"] = { + k.replace(metadata["libraries_prefix"], ""): version + for k, version in packages_data.items() + if metadata["libraries_prefix"] in k + } + if "extensions_prefix" in metadata: + versions[f"{package}_extensions"] = { + k.replace(metadata["extensions_prefix"], ""): version + for k, version in packages_data.items() + if metadata["extensions_prefix"] in k + } + + if "providers_prefix" in metadata: + versions[f"{package}_providers"] = { + k.replace(metadata["providers_prefix"], ""): version + for k, version in packages_data.items() + if metadata["providers_prefix"] in k + } + + return versions + + +def update_config_release(cluster_domain, release): + def replace_release(path): + replace_in_file(path, r"^release:.*", f'release: "{release}"') + + config_path = Path("config") / cluster_domain + replace_release(config_path / "cluster-params.yaml") + for env in config_path.glob("environments/*/environment.yaml"): + replace_release(env) diff --git a/scripts/volumes.py b/scripts/volumes.py new file mode 100644 index 00000000..7a508474 --- /dev/null +++ b/scripts/volumes.py @@ -0,0 +1,55 @@ +import json +from typing import NamedTuple + +import questionary + +from scripts.k8s_utils import kubectl, kubectl_output + + +class PV(NamedTuple): + name: str + phase: str + volume_id: str + volume_region: str + pvc_namespace: str + pvc_name: str + + @classmethod + def fromk8s(cls, v): + name = v["metadata"]["name"] + phase = v.get("status", {}).get("phase") + spec = v["spec"] + volume_id = spec.get("awsElasticBlockStore", {}).get("volumeID", "") + volume_region = ( + volume_id[len("aws://") :].split("/")[0] + if volume_id.startswith("aws://") + else "" + ) + claim = spec.get("claimRef", {}) + pvc_namespace = claim.get("namespace") + pvc_name = claim.get("name") + return PV( + name=name, + phase=phase, + volume_id=volume_id, + volume_region=volume_region, + pvc_name=pvc_name, + pvc_namespace=pvc_namespace, + ) + + +def get_pvs_raw(): + return json.loads(kubectl_output("get pv -o json"))["items"] + + +def get_pvs(): + return [PV.fromk8s(v) for v in get_pvs_raw()] + + +def delete_released_pvs(): + pvs = get_pvs() + for pv in pvs: + if pv.phase == "Released": + print(pv) + if questionary.confirm("Delete PV?").ask(): + kubectl("delete pv", pv.name) diff --git a/secrets/mapping.cfg b/secrets/mapping.cfg new file mode 100644 index 00000000..c9ccdf10 --- /dev/null +++ b/secrets/mapping.cfg @@ -0,0 +1,8 @@ +config/datacoveslocal.com/secrets/core-api.env +config/datacoveslocal.com/secrets/core-dbt-api.env +config/datacoveslocal.com/secrets/docker-config.secret.json +config/datacoveslocal.com/cluster-params.secret.yaml +config/datacoveslocal.com/environments/dev123/environment.secret.yaml +secrets/cli.secret.yaml +secrets/integration_tests.yaml +infra/base/cloudflare-api-token.env diff --git a/src/.gitignore b/src/.gitignore new file mode 100644 index 00000000..53c50b0e --- /dev/null +++ b/src/.gitignore @@ -0,0 +1,28 @@ +**/base.txt +!common/requirements/base.txt + +**/dbt-snowflake.txt +!common/requirements/dbt-snowflake.txt + +**/dbt-redsfhit.txt +!common/requirements/dbt-redshift.txt + +**/dbt-bigquery.txt +!common/requirements/dbt-bigquery.txt + +**/dbt-databricks.txt +!common/requirements/dbt-databricks.txt + +**/set_adapters_app.sh +!/common/set_adapters_app.sh + +**/providers.txt +!/common/providers/providers.txt + +ci/multiarch/requirements.txt + +ci/airflow/plugins +airflow/airflow/plugins + +ci/airflow/providers +airflow/airflow/providers diff --git a/src/__init__.py b/src/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/__init__.py b/src/airflow/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/Dockerfile b/src/airflow/airflow/Dockerfile new file mode 100644 index 00000000..18a5778f --- /dev/null +++ b/src/airflow/airflow/Dockerfile @@ -0,0 +1,167 @@ +FROM apache/airflow:2.10.3-python3.10 AS base +LABEL com.datacoves.library.airflow.psycopg2-binary=2.9.9 +LABEL com.datacoves.library.airflow.snowflake-connector-python[pandas]=3.14.0 +LABEL com.datacoves.library.airflow.snowflake-snowpark-python=1.25.0 +LABEL com.datacoves.library.airflow.acryl-datahub=0.15.0.5 +LABEL com.datacoves.library.airflow.dbt-core=1.9.0 +LABEL com.datacoves.library.airflow.protobuf=5.29.3 +LABEL com.datacoves.library.airflow.dbt-databricks=1.9.0 +LABEL com.datacoves.library.airflow.dbt-snowflake=1.9.0 +LABEL com.datacoves.library.airflow.dbt-redshift=1.9.0 +LABEL com.datacoves.library.airflow.dbt-bigquery=1.9.0 +LABEL com.datacoves.library.airflow.dbt-postgres=1.6.3 +LABEL com.datacoves.library.airflow.dbt-coves=1.9.5 +LABEL com.datacoves.library.airflow.git+https://gitlab.com/datacoves/permifrost.git=v0.15.6 +LABEL com.datacoves.library.airflow.pre-commit=3.7.1 +LABEL com.datacoves.library.airflow.PyYAML=6.0.2 +LABEL com.datacoves.library.airflow.shandy-sqlfmt[jinjafmt]=0.26.0 +LABEL com.datacoves.library.airflow.sqlfluff=3.1.1 +LABEL com.datacoves.library.airflow.sqlfluff-templater-dbt=3.1.1 +LABEL com.datacoves.library.airflow.rich=14.0.0 +LABEL com.datacoves.library.airflow.kubernetes=31.0.0 +LABEL com.datacoves.library.airflow.uv=0.4.30 +LABEL com.datacoves.library.airflow.ruff=0.8.3 +LABEL com.datacoves.library.airflow.snowflake-cli=3.7.1 +LABEL com.datacoves.library.airflow.certifi=2025.1.31 +LABEL com.datacoves.library.airflow.uv=0.4.30 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-snowflake=5.8.0 +LABEL com.datacoves.provider.airflow.acryl-datahub-airflow-plugin[plugin-v2]=0.14.1.9 +LABEL com.datacoves.provider.airflow.airflow-provider-fivetran-async=2.0.2 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-airbyte=3.6.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-microsoft-azure=11.0.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-amazon[s3fs]=9.0.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-celery=3.8.3 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-cncf-kubernetes=10.3.1 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-common-io=1.4.2 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-common-sql=1.19.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-databricks=6.12.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-docker=3.14.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-elasticsearch=5.5.2 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-ftp=3.11.1 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-google=10.25.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-grpc=3.7.3 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-hashicorp=3.8.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-http=4.13.2 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-imap=3.7.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-mysql=5.7.3 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-odbc=4.8.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-openlineage=1.13.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-oracle=3.12.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-postgres=5.13.1 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-redis=3.8.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-sendgrid=3.6.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-sftp=4.11.1 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-slack=8.9.1 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-smtp=1.8.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-sqlite=3.9.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-ssh=3.14.0 +LABEL com.datacoves.provider.airflow.apache-airflow-providers-tableau=4.6.1 +LABEL com.datacoves.provider.airflow.packaging=23.2 +LABEL com.datacoves.provider.airflow.python-jose=3.3.0 +LABEL com.datacoves.provider.airflow.pyyaml=6.0 +LABEL com.datacoves.provider.airflow.pytest=8.3.4 + +USER root + +# Note: Local airflow requires the UID to be 1000, otherwise it goes to +# war with code-server over file permissions. +# +# Running the usermod command takes forever because it chown's all of +# /home/airflow to UID 1000 +# +# In order to save Docker build time, it makes more sense to convert +# the UID's for all our airflows so we're not doing this command +# multiple times for each local ariflow. + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + git \ + vim \ + g++ \ + procps \ + && apt-get autoremove -yqq --purge \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && usermod -u 1000 airflow \ + && chown -R airflow /opt/airflow + +RUN echo 'airflow:Datacoves!' | chpasswd +RUN echo "airflow ALL=(ALL) ALL" >> /etc/sudoers + +RUN mkdir -p /opt/datacoves/virtualenvs/main && \ + mkdir -p /opt/datacoves/dags + +RUN chown -R airflow:50000 /opt/datacoves +COPY providers ./providers +RUN chown -R airflow:50000 ./providers + +USER 1000 + +# this will be installed in ~/.local because PIP_USER=true +RUN pip install -U pip && \ + pip install uv && \ + uv pip install --no-cache-dir -r ./providers/providers.txt + +# Uninstalling openlineage-airflow package installed by acryl-datahub-airflow-plugin[plugin-v2] +# since it is not needed on airflow 2.7+ +RUN uv pip uninstall openlineage-airflow + +# install datacoves-airflow-provider +RUN uv pip install ./providers/datacoves + +# copy custom plugins used by Datacoves +COPY plugins/ /opt/airflow/plugins/ + +# copy script that sends SIGINT to dbt +# https://medium.com/@brianepohl/terminating-dbt-in-dagster-kubernetes-job-c53c3bc26012 +COPY pre_stop_hook.sh /opt/datacoves/ + +# copy script that handles post start hook +COPY post_start_hook.sh /opt/datacoves/ + +# copy script that changes default app name on dbt adapters +COPY set_adapters_app.sh /opt/datacoves/ + +# Datacoves secrets backend +COPY secrets/datacoves.py /home/airflow/.local/lib/python3.10/site-packages/airflow/secrets/ + +# Datacoves authentication backends +COPY auth/custom_api_auth.py /home/airflow/.local/lib/python3.10/site-packages/airflow/auth/ + +# Loki Logs => https://github.com/snjypl/airflow-provider-grafana-loki/tree/main +COPY logs/ /opt/airflow/config + +# Hotfixes +COPY hotfix/file_task_handler.py /home/airflow/.local/lib/python3.10/site-packages/airflow/utils/log/ + +# pip install will work only if `--user` is provided, or after activating a virtualenv +ENV PIP_USER=false +RUN python -m venv /opt/datacoves/virtualenvs/main + +FROM base AS dbt-snowflake + +COPY profiles/dbt-snowflake ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-snowflake.txt \ + && /opt/datacoves/set_adapters_app.sh all + +FROM base AS dbt-redshift + +COPY profiles/dbt-redshift ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-redshift.txt \ + && /opt/datacoves/set_adapters_app.sh postgres + +FROM base AS dbt-bigquery + +COPY profiles/dbt-bigquery ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-bigquery.txt \ + && /opt/datacoves/set_adapters_app.sh bigquery /opt/datacoves/virtualenvs/main/lib --skip-validation + +FROM base AS dbt-databricks + +COPY profiles/dbt-databricks ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-databricks.txt \ + && /opt/datacoves/set_adapters_app.sh databricks /opt/datacoves/virtualenvs/main/lib --skip-validation diff --git a/src/airflow/airflow/__init__.py b/src/airflow/airflow/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/auth/custom_api_auth.py b/src/airflow/airflow/auth/custom_api_auth.py new file mode 100644 index 00000000..bbe2152e --- /dev/null +++ b/src/airflow/airflow/auth/custom_api_auth.py @@ -0,0 +1,223 @@ +""" +Examples: + +https://github.com/apache/airflow/pull/10267/files#diff-ce647368acfc9678b618a99ab16d0cedfcef42eb218357b002a00b5c514cfab2 +https://gist.github.com/chrismclennon/c65eed60679a44412f0601f4a16cfaaf +""" +import logging +import os +from functools import wraps +from typing import Callable, Optional, Tuple, TypeVar, Union, cast + +import requests +from flask import Response, current_app, request +from flask_login import login_user +from jose import jwt +from requests.auth import AuthBase + +log = logging.getLogger(__name__) + +CLIENT_AUTH: Optional[Union[Tuple[str, str], AuthBase]] = None +T = TypeVar("T", bound=Callable) + + +def init_app(_): + """Initializes authentication backend""" + + +def _forbidden(): + return Response("Forbidden", 403) + + +def _lookup_user(email: str, name: str, role_name: str): + security_manager = current_app.appbuilder.sm + username = email.split("@")[0] + user = security_manager.find_user(email=email) or security_manager.find_user( + username=username + ) + + role = security_manager.find_role(role_name) + if role is None: + log.error("Role %s does not exists", role_name) + return None + + if user is None: + log.info("Token valid, creating api user: %s role: %s", username, role_name) + user = security_manager.add_user( + username=username, + first_name=name, + last_name="", + email=email, + role=role, + password="test", + ) + + if not user: + return None + + if not user.is_active: + return None + + user.role = role + security_manager.update_user(user) + + return user + + +def _get_role(permissions: list): + account_slug = os.getenv("DATACOVES__ACCOUNT_SLUG") + project_slug = os.getenv("DATACOVES__PROJECT_SLUG") + env_slug = os.getenv("DATACOVES__ENVIRONMENT_SLUG") + + # Valid if the enviroment variables exists + if not all([account_slug, project_slug, env_slug]): + missing_vars = [] + if not account_slug: + missing_vars.append("DATACOVES__ACCOUNT_SLUG") + if not project_slug: + missing_vars.append("DATACOVES__PROJECT_SLUG") + if not env_slug: + missing_vars.append("DATACOVES__ENVIRONMENT_SLUG") + + log.error(f"{', '.join(missing_vars)} env vars are missing.") + return None + + # Permissions + permission_roles = { + # The same for Op role: We need Admin role to manage roles with the service account + "Admin": [ + f"{account_slug}:{project_slug}:{env_slug}|workbench:airflow:security|write", + f"{account_slug}:{project_slug}|workbench:airflow:security|write", + f"{account_slug}:{project_slug}:{env_slug}|workbench:airflow:admin|write", + f"{account_slug}:{project_slug}|workbench:airflow:admin|write", + ], + "SysAdmin": [ + f"{account_slug}:{project_slug}:{env_slug}|workbench:airflow:sysadmin|write", + f"{account_slug}:{project_slug}|workbench:airflow:sysadmin|write", + ], + "User": [ + f"{account_slug}:{project_slug}:{env_slug}|workbench:airflow:dags|write", + f"{account_slug}:{project_slug}|workbench:airflow:dags|write", + ], + "Viewer": [ + f"{account_slug}:{project_slug}:{env_slug}|workbench:airflow:dags|read", + f"{account_slug}:{project_slug}:{env_slug}|workbench:airflow|read", + f"{account_slug}:{project_slug}|workbench:airflow:dags|read", + f"{account_slug}:{project_slug}|workbench:airflow|read", + ], + } + + # Checks environment permissions + for role, role_permissions in permission_roles.items(): + if any(perm in permissions for perm in role_permissions): + return role + + return None + + +def _check_jwt_token(token: str): + """This shouldn't have 'Bearer' in the token.""" + + base_url_core_api = os.getenv("DATACOVES__BASE_URL_CORE_API") + if base_url_core_api is None: + log.error("DATACOVES__BASE_URL_CORE_API env var is missing.") + return None + + # Validate token + payload = {"token": token} + endpoint = f"{base_url_core_api}/api/token/verify/" + headers = {"Content-Type": "application/json"} + r = requests.post(url=endpoint, headers=headers, json=payload) + if r.ok: + jwt_decode = jwt.decode(token, None, options={"verify_signature": False}) + permissions = jwt_decode.get("permissions", []) + email = jwt_decode["email"] + name = jwt_decode["name"] + role = _get_role(permissions=permissions) + if role is not None: + return _lookup_user(email=email, name=name, role_name=role) + else: + log.error("User %s does not have valid permissions", email) + + else: + log.info( + "Unable to verify JWToken: url=%s status_code=%s response=%s", + endpoint, + r.status_code, + r.text, + ) + + return None + + +def _check_datacoves_token(token: str): + """This SHOULD have 'Token' in the token""" + + base_url_core_api = os.getenv("DATACOVES__BASE_URL_CORE_API") + if base_url_core_api is None: + log.error("DATACOVES__BASE_URL_CORE_API env var is missing.") + return None + + endpoint = f"{base_url_core_api}/api/datacoves/verify/" + headers = { + "Content-Type": "application/json", + "Authorization": token, + } + + # Validate token + r = requests.get(url=endpoint, headers=headers) + + if r.ok: + items = r.json() + permissions = items.get("permissions", []) + email = items.get("email", "") + name = items.get("name", "") + role = _get_role(permissions=permissions) + + if role is not None: + return _lookup_user(email=email, name=name, role_name=role) + + else: + log.error("User %s does not have valid permissions", email) + + else: + log.info( + "Unable to verify Datacoves Token: url=%s status_code=%s response=%s", + endpoint, + r.status_code, + r.text, + ) + + return None + + +def requires_authentication(function: T): + """Decorator for functions that require authentication""" + + @wraps(function) + def decorated(*args, **kwargs): + authorization = request.headers.get("Authorization") + if not authorization: + return _forbidden() + + # Bearer is JWT, Token is .... uh, token. + if "Bearer" in authorization: + token = authorization.replace("Bearer ", "") + user = _check_jwt_token(token=token) + + elif "Token" in authorization: + # We're just going to proxy the token to core API, so there is + # no need to trim 'Token' off. + user = _check_datacoves_token(token=authorization) + + else: + return _forbidden() + + if user is None: + return _forbidden() + + login_user(user, remember=False) + response = function(*args, **kwargs) + return response + + return cast(T, decorated) diff --git a/src/airflow/airflow/hotfix/file_task_handler.py b/src/airflow/airflow/hotfix/file_task_handler.py new file mode 100644 index 00000000..b1986808 --- /dev/null +++ b/src/airflow/airflow/hotfix/file_task_handler.py @@ -0,0 +1,656 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""File logging handler for tasks.""" +from __future__ import annotations + +import inspect +import logging +import os +import warnings +from contextlib import suppress +from enum import Enum +from functools import cached_property +from pathlib import Path +from typing import TYPE_CHECKING, Any, Callable, Iterable +from urllib.parse import urljoin + +import pendulum + +from airflow.configuration import conf +from airflow.exceptions import AirflowException, RemovedInAirflow3Warning +from airflow.executors.executor_loader import ExecutorLoader +from airflow.utils.context import Context +from airflow.utils.helpers import parse_template_string, render_template_to_string +from airflow.utils.log.logging_mixin import SetContextPropagate +from airflow.utils.log.non_caching_file_handler import NonCachingFileHandler +from airflow.utils.session import create_session +from airflow.utils.state import State, TaskInstanceState + +if TYPE_CHECKING: + from airflow.models.taskinstance import TaskInstance, TaskInstanceKey + +logger = logging.getLogger(__name__) + + +class LogType(str, Enum): + """ + Type of service from which we retrieve logs. + + :meta private: + """ + + TRIGGER = "trigger" + WORKER = "worker" + + +def _set_task_deferred_context_var(): + """ + Tell task log handler that task exited with deferral. + + This exists for the sole purpose of telling elasticsearch handler not to + emit end_of_log mark after task deferral. + + Depending on how the task is run, we may need to set this in task command or in local task job. + Kubernetes executor requires the local task job invocation; local executor requires the task + command invocation. + + :meta private: + """ + logger = logging.getLogger() + with suppress(StopIteration): + h = next(h for h in logger.handlers if hasattr(h, "ctx_task_deferred")) + h.ctx_task_deferred = True + + +def _fetch_logs_from_service(url, log_relative_path): + # Import occurs in function scope for perf. Ref: https://github.com/apache/airflow/pull/21438 + import httpx + + from airflow.utils.jwt_signer import JWTSigner + + timeout = conf.getint("webserver", "log_fetch_timeout_sec", fallback=None) + signer = JWTSigner( + secret_key=conf.get("webserver", "secret_key"), + expiration_time_in_seconds=conf.getint( + "webserver", "log_request_clock_grace", fallback=30 + ), + audience="task-instance-logs", + ) + response = httpx.get( + url, + timeout=timeout, + headers={ + "Authorization": signer.generate_signed_token( + {"filename": log_relative_path} + ) + }, + ) + response.encoding = "utf-8" + return response + + +_parse_timestamp = conf.getimport( + "logging", "interleave_timestamp_parser", fallback=None +) + +if not _parse_timestamp: + + def _parse_timestamp(line: str): + timestamp_str, _ = line.split(" ", 1) + return pendulum.parse(timestamp_str.strip("[]")) + + +def _parse_timestamps_in_log_file(lines: Iterable[str]): + timestamp = None + next_timestamp = None + for idx, line in enumerate(lines): + if line: + with suppress(Exception): + # next_timestamp unchanged if line can't be parsed + next_timestamp = _parse_timestamp(line) + if next_timestamp: + timestamp = next_timestamp + yield timestamp, idx, line + + +def _interleave_logs(*logs): + records = [] + for log in logs: + records.extend(_parse_timestamps_in_log_file(log.splitlines())) + last = None + for _, _, v in sorted( + records, + key=lambda x: (x[0], x[1]) if x[0] else (pendulum.datetime(2000, 1, 1), x[1]), + ): + if v != last: # dedupe + yield v + last = v + + +def _ensure_ti(ti: TaskInstanceKey | TaskInstance, session) -> TaskInstance: + """Given TI | TIKey, return a TI object. + + Will raise exception if no TI is found in the database. + """ + from airflow.models.taskinstance import TaskInstance, TaskInstanceKey + + if not isinstance(ti, TaskInstanceKey): + return ti + val = ( + session.query(TaskInstance) + .filter( + TaskInstance.task_id == ti.task_id, + TaskInstance.dag_id == ti.dag_id, + TaskInstance.run_id == ti.run_id, + TaskInstance.map_index == ti.map_index, + ) + .one_or_none() + ) + if isinstance(val, TaskInstance): + val._try_number = ti.try_number + return val + else: + raise AirflowException(f"Could not find TaskInstance for {ti}") + + +def _change_directory_permissions_up(directory: Path, folder_permissions: int): + """ + Change permissions of the given directory and its parents. + + Only attempt to change permissions for directories owned by the current user. + + :param directory: directory to change permissions of (including parents) + :param folder_permissions: permissions to set + """ + if directory.stat().st_uid == os.getuid(): + if directory.stat().st_mode % 0o1000 != folder_permissions % 0o1000: + print(f"Changing {directory} permission to {folder_permissions}") + try: + directory.chmod(folder_permissions) + except PermissionError as e: + # In some circumstances (depends on user and filesystem) we might not be able to + # change the permission for the folder (when the folder was created by another user + # before or when the filesystem does not allow to change permission). We should not + # fail in this case but rather ignore it. + print( + f"Failed to change {directory} permission to {folder_permissions}: {e}" + ) + return + if directory.parent != directory: + _change_directory_permissions_up(directory.parent, folder_permissions) + + +class FileTaskHandler(logging.Handler): + """ + FileTaskHandler is a python log handler that handles and reads task instance logs. + + It creates and delegates log handling to `logging.FileHandler` after receiving task + instance context. It reads logs from task instance's host machine. + + :param base_log_folder: Base log folder to place logs. + :param filename_template: template filename string + """ + + trigger_should_wrap = True + inherits_from_empty_operator_log_message = ( + "Operator inherits from empty operator and thus does not have logs" + ) + + def __init__(self, base_log_folder: str, filename_template: str | None = None): + super().__init__() + self.handler: logging.Handler | None = None + self.local_base = base_log_folder + if filename_template is not None: + warnings.warn( + "Passing filename_template to a log handler is deprecated and has no effect", + RemovedInAirflow3Warning, + # We want to reference the stack that actually instantiates the + # handler, not the one that calls super()__init__. + stacklevel=(2 if type(self) == FileTaskHandler else 3), # noqa + ) + self.maintain_propagate: bool = False + """ + If true, overrides default behavior of setting propagate=False + + :meta private: + """ + + self.ctx_task_deferred = False + """ + If true, task exited with deferral to trigger. + + Some handlers emit "end of log" markers, and may not wish to do so when task defers. + """ + + def set_context( + self, ti: TaskInstance, *, identifier: str | None = None + ) -> None | SetContextPropagate: + """ + Provide task_instance context to airflow task handler. + + Generally speaking returns None. But if attr `maintain_propagate` has + been set to propagate, then returns sentinel MAINTAIN_PROPAGATE. This + has the effect of overriding the default behavior to set `propagate` + to False whenever set_context is called. At time of writing, this + functionality is only used in unit testing. + + :param ti: task instance object + :param identifier: if set, adds suffix to log file. For use when relaying exceptional messages + to task logs from a context other than task or trigger run + """ + local_loc = self._init_file(ti, identifier=identifier) + self.handler = NonCachingFileHandler(local_loc, encoding="utf-8") + if self.formatter: + self.handler.setFormatter(self.formatter) + self.handler.setLevel(self.level) + return ( + SetContextPropagate.MAINTAIN_PROPAGATE if self.maintain_propagate else None + ) + + @cached_property + def supports_task_context_logging(self) -> bool: + return "identifier" in inspect.signature(self.set_context).parameters + + @staticmethod + def add_triggerer_suffix(full_path, job_id=None): + """ + Derive trigger log filename from task log filename. + + E.g. given /path/to/file.log returns /path/to/file.log.trigger.123.log, where 123 + is the triggerer id. We use the triggerer ID instead of trigger ID to distinguish + the files because, rarely, the same trigger could get picked up by two different + triggerer instances. + """ + full_path = Path(full_path).as_posix() + full_path += f".{LogType.TRIGGER.value}" + if job_id: + full_path += f".{job_id}.log" + return full_path + + def emit(self, record): + if self.handler: + self.handler.emit(record) + + def flush(self): + if self.handler: + self.handler.flush() + + def close(self): + if self.handler: + self.handler.close() + + def _render_filename( + self, ti: TaskInstance | TaskInstanceKey, try_number: int + ) -> str: + """Return the worker log filename.""" + with create_session() as session: + ti = _ensure_ti(ti, session) + dag_run = ti.get_dagrun(session=session) + template = dag_run.get_log_template(session=session).filename + str_tpl, jinja_tpl = parse_template_string(template) + + if jinja_tpl: + if hasattr(ti, "task"): + context = ti.get_template_context(session=session) + else: + context = Context(ti=ti, ts=dag_run.logical_date.isoformat()) + context["try_number"] = try_number + return render_template_to_string(jinja_tpl, context) + + if str_tpl: + try: + dag = ti.task.dag + except AttributeError: # ti.task is not always set. + data_interval = (dag_run.data_interval_start, dag_run.data_interval_end) + else: + if TYPE_CHECKING: + assert dag is not None + data_interval = dag.get_run_data_interval(dag_run) + if data_interval[0]: + data_interval_start = data_interval[0].isoformat() + else: + data_interval_start = "" + if data_interval[1]: + data_interval_end = data_interval[1].isoformat() + else: + data_interval_end = "" + return str_tpl.format( + dag_id=ti.dag_id, + task_id=ti.task_id, + run_id=ti.run_id, + data_interval_start=data_interval_start, + data_interval_end=data_interval_end, + execution_date=ti.get_dagrun().logical_date.isoformat(), + try_number=try_number, + ) + else: + raise RuntimeError( + f"Unable to render log filename for {ti}. This should never happen" + ) + + def _read_grouped_logs(self): + return False + + @cached_property + def _executor_get_task_log( + self, + ) -> Callable[[TaskInstance, int], tuple[list[str], list[str]]]: + """This cached property avoids loading executor repeatedly.""" + executor = ExecutorLoader.get_default_executor() + return executor.get_task_log + + def _read( + self, + ti: TaskInstance, + try_number: int, + metadata: dict[str, Any] | None = None, + ): + """ + Template method that contains custom logic of reading logs given the try_number. + + :param ti: task instance record + :param try_number: current try_number to read log from + :param metadata: log metadata, + can be used for steaming log reading and auto-tailing. + Following attributes are used: + log_pos: (absolute) Char position to which the log + which was retrieved in previous calls, this + part will be skipped and only following test + returned to be added to tail. + :return: log message as a string and metadata. + Following attributes are used in metadata: + end_of_log: Boolean, True if end of log is reached or False + if further calls might get more log text. + This is determined by the status of the TaskInstance + log_pos: (absolute) Char position to which the log is retrieved + """ + # Task instance here might be different from task instance when + # initializing the handler. Thus explicitly getting log location + # is needed to get correct log path. + worker_log_rel_path = self._render_filename(ti, try_number) + messages_list: list[str] = [] + remote_logs: list[str] = [] + local_logs: list[str] = [] + executor_messages: list[str] = [] + executor_logs: list[str] = [] + served_logs: list[str] = [] + is_running = ti.try_number == try_number and ti.state in ( + TaskInstanceState.RUNNING, + TaskInstanceState.DEFERRED, + ) + with suppress(NotImplementedError): + remote_messages, remote_logs = self._read_remote_logs( + ti, try_number, metadata + ) + messages_list.extend(remote_messages) + if ti.state == TaskInstanceState.RUNNING: + response = self._executor_get_task_log(ti, try_number) + if response: + executor_messages, executor_logs = response + if executor_messages: + messages_list.extend(executor_messages) + if not (remote_logs and ti.state not in State.unfinished): + # when finished, if we have remote logs, no need to check local + worker_log_full_path = Path(self.local_base, worker_log_rel_path) + local_messages, local_logs = self._read_from_local(worker_log_full_path) + messages_list.extend(local_messages) + if is_running and not executor_messages: + served_messages, served_logs = self._read_from_logs_server( + ti, worker_log_rel_path + ) + messages_list.extend(served_messages) + elif ti.state not in State.unfinished and not (local_logs or remote_logs): + # ordinarily we don't check served logs, with the assumption that users set up + # remote logging or shared drive for logs for persistence, but that's not always true + # so even if task is done, if no local logs or remote logs are found, we'll check the worker + served_messages, served_logs = self._read_from_logs_server( + ti, worker_log_rel_path + ) + messages_list.extend(served_messages) + + logs = "\n".join( + _interleave_logs( + *local_logs, + *remote_logs, + *(executor_logs or []), + *served_logs, + ) + ) + log_pos = len(logs) + messages = "".join([f"*** {x}\n" for x in messages_list]) + if metadata and "log_pos" in metadata: + previous_chars = metadata["log_pos"] + logs = logs[ + previous_chars: + ] # Cut off previously passed log test as new tail + out_message = logs if "log_pos" in (metadata or {}) else messages + logs + return out_message, {"end_of_log": not is_running, "log_pos": log_pos} + + @staticmethod + def _get_pod_namespace(ti: TaskInstance): + pod_override = ti.executor_config.get("pod_override") + namespace = None + with suppress(Exception): + namespace = pod_override.metadata.namespace + return namespace or conf.get("kubernetes_executor", "namespace") + + def _get_log_retrieval_url( + self, ti: TaskInstance, log_relative_path: str, log_type: LogType | None = None + ) -> tuple[str, str]: + """Given TI, generate URL with which to fetch logs from service log server.""" + if log_type == LogType.TRIGGER: + if not ti.triggerer_job: + raise RuntimeError( + "Could not build triggerer log URL; no triggerer job." + ) + config_key = "triggerer_log_server_port" + config_default = 8794 + hostname = ti.triggerer_job.hostname + log_relative_path = self.add_triggerer_suffix( + log_relative_path, job_id=ti.triggerer_job.id + ) + else: + hostname = ti.hostname + config_key = "worker_log_server_port" + config_default = 8793 + return ( + urljoin( + f"http://{hostname}:{conf.get('logging', config_key, fallback=config_default)}/log/", + log_relative_path, + ), + log_relative_path, + ) + + def read(self, task_instance, try_number=None, metadata=None): + """ + Read logs of given task instance from local machine. + + :param task_instance: task instance object + :param try_number: task instance try_number to read logs from. If None + it returns all logs separated by try_number + :param metadata: log metadata, can be used for steaming log reading and auto-tailing. + :return: a list of listed tuples which order log string by host + """ + # Task instance increments its try number when it starts to run. + # So the log for a particular task try will only show up when + # try number gets incremented in DB, i.e logs produced the time + # after cli run and before try_number + 1 in DB will not be displayed. + if try_number is None: + next_try = task_instance.next_try_number + try_numbers = list(range(1, next_try)) + elif try_number < 1: + logs = [ + [ + ( + "default_host", + f"Error fetching the logs. Try number {try_number} is invalid.", + ) + ], + ] + return logs, [{"end_of_log": True}] + else: + try_numbers = [try_number] + + logs = [""] * len(try_numbers) + metadata_array = [{}] * len(try_numbers) + + # subclasses implement _read and may not have log_type, which was added recently + for i, try_number_element in enumerate(try_numbers): + log, out_metadata = self._read(task_instance, try_number_element, metadata) + # es_task_handler return logs grouped by host. wrap other handler returning log string + # with default/ empty host so that UI can render the response in the same way + logs[i] = ( + log if self._read_grouped_logs() else [(task_instance.hostname, log)] + ) + metadata_array[i] = out_metadata + + return logs, metadata_array + + def _prepare_log_folder(self, directory: Path): + """ + Prepare the log folder and ensure its mode is as configured. + + To handle log writing when tasks are impersonated, the log files need to + be writable by the user that runs the Airflow command and the user + that is impersonated. This is mainly to handle corner cases with the + SubDagOperator. When the SubDagOperator is run, all of the operators + run under the impersonated user and create appropriate log files + as the impersonated user. However, if the user manually runs tasks + of the SubDagOperator through the UI, then the log files are created + by the user that runs the Airflow command. For example, the Airflow + run command may be run by the `airflow_sudoable` user, but the Airflow + tasks may be run by the `airflow` user. If the log files are not + writable by both users, then it's possible that re-running a task + via the UI (or vice versa) results in a permission error as the task + tries to write to a log file created by the other user. + + We leave it up to the user to manage their permissions by exposing configuration for both + new folders and new log files. Default is to make new log folders and files group-writeable + to handle most common impersonation use cases. The requirement in this case will be to make + sure that the same group is set as default group for both - impersonated user and main airflow + user. + """ + new_folder_permissions = int( + conf.get( + "logging", "file_task_handler_new_folder_permissions", fallback="0o775" + ), + 8, + ) + directory.mkdir(mode=new_folder_permissions, parents=True, exist_ok=True) + _change_directory_permissions_up(directory, new_folder_permissions) + + def _init_file(self, ti, *, identifier: str | None = None): + """ + Create log directory and give it permissions that are configured. + + See above _prepare_log_folder method for more detailed explanation. + + :param ti: task instance object + :return: relative log path of the given task instance + """ + new_file_permissions = int( + conf.get( + "logging", "file_task_handler_new_file_permissions", fallback="0o664" + ), + 8, + ) + local_relative_path = self._render_filename(ti, ti.try_number) + full_path = os.path.join(self.local_base, local_relative_path) + if identifier: + full_path += f".{identifier}.log" + elif ti.is_trigger_log_context is True: + # if this is true, we're invoked via set_context in the context of + # setting up individual trigger logging. return trigger log path. + full_path = self.add_triggerer_suffix( + full_path=full_path, job_id=ti.triggerer_job.id + ) + self._prepare_log_folder(Path(full_path).parent) + + if not os.path.exists(full_path): + open(full_path, "a").close() + # HOTFIX: Catch PermissionError exception to avoid issues in Azure Blob Storage + try: + os.chmod(full_path, new_file_permissions) + except (OSError, PermissionError) as e: + # Avoid printing warning in task logs + # logging.warning("OSError while changing ownership of the log file. ", e) + logging.debug("OSError while changing ownership of the log file. ", e) + + return full_path + + @staticmethod + def _read_from_local(worker_log_path: Path) -> tuple[list[str], list[str]]: + messages = [] + paths = sorted(worker_log_path.parent.glob(worker_log_path.name + "*")) + if paths: + messages.append("Found local files:") + messages.extend(f" * {x}" for x in paths) + logs = [file.read_text() for file in paths] + return messages, logs + + def _read_from_logs_server( + self, ti, worker_log_rel_path + ) -> tuple[list[str], list[str]]: + messages = [] + logs = [] + try: + log_type = LogType.TRIGGER if ti.triggerer_job else LogType.WORKER + url, rel_path = self._get_log_retrieval_url( + ti, worker_log_rel_path, log_type=log_type + ) + response = _fetch_logs_from_service(url, rel_path) + if response.status_code == 403: + messages.append( + "!!!! Please make sure that all your Airflow components (e.g. " + "schedulers, webservers, workers and triggerer) have " + "the same 'secret_key' configured in 'webserver' section and " + "time is synchronized on all your machines (for example with ntpd)\n" + "See more at https://airflow.apache.org/docs/apache-airflow/" + "stable/configurations-ref.html#secret-key" + ) + # Check if the resource was properly fetched + response.raise_for_status() + if response.text: + messages.append(f"Found logs served from host {url}") + logs.append(response.text) + except Exception as e: + from httpx import UnsupportedProtocol + + if ( + isinstance(e, UnsupportedProtocol) + and ti.task.inherits_from_empty_operator is True + ): + messages.append(self.inherits_from_empty_operator_log_message) + else: + messages.append(f"Could not read served logs: {e}") + logger.exception("Could not read served logs") + return messages, logs + + def _read_remote_logs( + self, ti, try_number, metadata=None + ) -> tuple[list[str], list[str]]: + """ + Implement in subclasses to read from the remote service. + + This method should return two lists, messages and logs. + + * Each element in the messages list should be a single message, + such as, "reading from x file". + * Each element in the logs list should be the content of one file. + """ + raise NotImplementedError diff --git a/src/airflow/airflow/logs/log_config.py b/src/airflow/airflow/logs/log_config.py new file mode 100644 index 00000000..60b4c09b --- /dev/null +++ b/src/airflow/airflow/logs/log_config.py @@ -0,0 +1,35 @@ +import os +from copy import deepcopy + +# import the default logging configuration +from airflow.config_templates.airflow_local_settings import ( + BASE_LOG_FOLDER, + DEFAULT_LOGGING_CONFIG, + FILENAME_TEMPLATE, +) + +LOGGING_CONFIG = deepcopy(DEFAULT_LOGGING_CONFIG) + +secondary_log_task_handler = os.environ.get("DATACOVES__SECONDARY_LOG_TASK_HANDLER") + +if secondary_log_task_handler and secondary_log_task_handler == "loki": + # add an additional handler + LOGGING_CONFIG["handlers"]["secondary_log_task_handler"] = { + # you can import your own custom handler here + "class": "loki.loki_task_handler.LokiTaskHandler", + # you can add a custom formatter here + "formatter": "airflow", + # name + "name": "loki", + # the following env variables were set in the dockerfile + "base_log_folder": os.path.expanduser(BASE_LOG_FOLDER), + "filename_template": FILENAME_TEMPLATE, + # if needed, custom filters can be added here + "filters": ["mask_secrets"], + } + + # this line adds the "secondary_log_task_handler" as a handler to airflow.task + LOGGING_CONFIG["loggers"]["airflow.task"]["handlers"] = [ + "task", + "secondary_log_task_handler", + ] diff --git a/src/airflow/airflow/logs/loki/__init__.py b/src/airflow/airflow/logs/loki/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/logs/loki/loki.py b/src/airflow/airflow/logs/loki/loki.py new file mode 100644 index 00000000..e9c45340 --- /dev/null +++ b/src/airflow/airflow/logs/loki/loki.py @@ -0,0 +1,50 @@ +import os +from typing import Dict + +import requests +import urllib3 + +from airflow.providers.http.hooks.http import HttpHook + +urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + + +class LokiHook(HttpHook): + """ + Loki Hook that interacts with an log push and query endpoint. + + :param loki_conn_id: connection that has the base API url i.e https://www.grafana.com/ + and optional authentication credentials. Default headers can also be specified in + the Extra field in json format. + :type loki_conn_id: str + """ + + conn_name_attr = "loki_conn_id" + default_conn_name = "loki_default" + conn_type = "loki" + hook_name = "Grafana Loki" + v1_base_endpoint = "/loki/api/v1/{method}" + + def __init__(self, loki_conn_id: str = default_conn_name, *args, **kwargs) -> None: + super().__init__(http_conn_id=loki_conn_id, *args, **kwargs) + self.loki_conn_id = loki_conn_id + self.tsl_verify = ( + os.getenv("DATACOVES__LOKI_TLS_VERIFY", "true").lower() == "true" + ) + + def query_range(self, params, headers=None) -> Dict: + query_range_endpoint = self.v1_base_endpoint.format(method="query_range") + self.method = "GET" + response = self.run(query_range_endpoint, data=params, headers=headers) + response.raise_for_status() + return response.json() + + def push_log(self, payload, headers=None) -> requests.Response: + push_endpoint = self.v1_base_endpoint.format(method="push") + self.method = "POST" + extra_options = {"timeout": 3, "verify": self.tsl_verify} + response = self.run( + push_endpoint, data=payload, extra_options=extra_options, headers=headers + ) + response.raise_for_status() + return response diff --git a/src/airflow/airflow/logs/loki/loki_task_handler.py b/src/airflow/airflow/logs/loki/loki_task_handler.py new file mode 100644 index 00000000..10f3b03c --- /dev/null +++ b/src/airflow/airflow/logs/loki/loki_task_handler.py @@ -0,0 +1,121 @@ +"""Loki logging handler for tasks""" + +import gzip +import json +import logging +import os +import time +import typing + +if typing.TYPE_CHECKING: + from airflow.models import TaskInstance + +from typing import Dict, List, Optional, Tuple + +from loki.loki import LokiHook + +from airflow.compat.functools import cached_property +from airflow.configuration import conf +from airflow.utils.log.file_task_handler import FileTaskHandler +from airflow.utils.log.logging_mixin import LoggingMixin + +logging.raiseExceptions = True +BasicAuth = Optional[Tuple[str, str]] +DEFAULT_LOGGER_NAME = "airflow" + + +class LokiTaskHandler(FileTaskHandler, LoggingMixin): + def __init__( + self, + base_log_folder, + name, + filename_template: Optional[str] = None, + ): + super().__init__(base_log_folder, filename_template) + self.name: str = name + self.handler: Optional[logging.FileHandler] = None + self.log_relative_path = "" + self.closed = False + self.upload_on_close = True + self.labels: Dict[str, str] = {} + self.env_slug = os.getenv("DATACOVES__ENVIRONMENT_SLUG") + self.project_slug = os.getenv("DATACOVES__PROJECT_SLUG") + self.account_slug = os.getenv("DATACOVES__ACCOUNT_SLUG") + self.namespace = f"dcw-{self.env_slug}" + + @cached_property + def hook(self) -> LokiHook: + """Returns LokiHook""" + remote_conn_id = str(conf.get("logging", "REMOTE_LOG_CONN_ID")) + return LokiHook(loki_conn_id=remote_conn_id) + + def get_labels(self, ti) -> Dict[str, str]: + return { + "job": "airflow-logs", + "agent": f"airflow-loki-{self.env_slug}", + "dag_id": ti.dag_id, + "run_id": getattr(ti, "run_id", ""), + "task_id": ti.task_id, + "try_number": str(ti.try_number), + "namespace": self.namespace, + "environment": self.env_slug, + "project": self.project_slug, + "account": self.account_slug, + } + + def set_context(self, task_instance: "TaskInstance") -> None: + super().set_context(task_instance) + + ti = task_instance + + self.log_relative_path = self._render_filename(ti, ti.try_number) + self.upload_on_close = not ti.raw + + # Clear the file first so that duplicate data is not uploaded + # when re-using the same path (e.g. with rescheduled sensors) + if self.upload_on_close: + if self.handler: + with open(self.handler.baseFilename, "w"): + pass + self.labels = self.get_labels(ti) + # self.extras = self.get_extras(ti) + self.extras = {} + + def close(self): + """Close and upload local log file to remote storage Loki.""" + + if self.closed: + return + + super().close() + + if not self.upload_on_close: + return + + local_loc = os.path.join(self.local_base, self.log_relative_path) + if os.path.exists(local_loc): + # read log and remove old logs to get just the latest additions + with open(local_loc) as logfile: + log = logfile.readlines() + self.loki_write(log) + + # Mark closed so we don't double write if close is called twice + self.closed = True + + def build_payload(self, log: List[str], labels) -> dict: + """Build JSON payload with a log entry.""" + lines = [[str(int(time.time_ns())), line] for line in log] + payload = { + "stream": labels, + "values": lines, + } + + return {"streams": [payload]} + + def loki_write(self, log): + payload = self.build_payload(log, self.labels) + + headers = {"Content-Type": "application/json", "Content-Encoding": "gzip"} + + payload = gzip.compress(json.dumps(payload).encode("utf-8")) + self.hook.push_log(payload=payload, headers=headers) diff --git a/src/airflow/airflow/post_start_hook.sh b/src/airflow/airflow/post_start_hook.sh new file mode 100755 index 00000000..653021f7 --- /dev/null +++ b/src/airflow/airflow/post_start_hook.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +# Wait for 8080 to open +while ! nc -z "${DATACOVES__ENVIRONMENT_SLUG}-airflow-webserver" 8080 < /dev/null; do sleep 1; done + +# Send request to core API to push secrets. This may take awhile as well. +while ! curl -f -k ${DATACOVES__SECRETS_PUSH_ENDPOINT}; do sleep 1; done diff --git a/src/airflow/airflow/pre_stop_hook.sh b/src/airflow/airflow/pre_stop_hook.sh new file mode 100755 index 00000000..65610f80 --- /dev/null +++ b/src/airflow/airflow/pre_stop_hook.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +#============================================================================================== +# Datacoves +# +# This script is designed to find dbt processes and kill them with the goal of not leaving +# queries or executions open in Snowflake if the pod is killed. +# +# More info: +# - https://medium.com/@brianepohl/terminating-dbt-in-dagster-kubernetes-job-c53c3bc26012 +#============================================================================================ +pids=`ps -ef | grep -v grep | grep bin/dbt | grep run | awk '{print $2}'` +for pid in $pids; do + if kill -0 "$pid" 2>/dev/null; then + echo "Sending SIGINT to PID $pid" + kill -2 "$pid" + sleep 1 # It is necessary to prevent the killing process from being cancelled. + else + echo "PID $pid does not exist or is not running" + fi +done diff --git a/src/airflow/airflow/profiles/base/python/.gitkeep b/src/airflow/airflow/profiles/base/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/profiles/dbt-bigquery/python/.gitkeep b/src/airflow/airflow/profiles/dbt-bigquery/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/profiles/dbt-databricks/python/.gitkeep b/src/airflow/airflow/profiles/dbt-databricks/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/profiles/dbt-redshift/python/.gitkeep b/src/airflow/airflow/profiles/dbt-redshift/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-0.6.2.vsix b/src/airflow/airflow/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-0.6.2.vsix new file mode 100644 index 00000000..fdc908aa Binary files /dev/null and b/src/airflow/airflow/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-0.6.2.vsix differ diff --git a/src/airflow/airflow/profiles/dbt-snowflake/python/.gitkeep b/src/airflow/airflow/profiles/dbt-snowflake/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/airflow/airflow/secrets/datacoves.py b/src/airflow/airflow/secrets/datacoves.py new file mode 100644 index 00000000..668efe39 --- /dev/null +++ b/src/airflow/airflow/secrets/datacoves.py @@ -0,0 +1,198 @@ +from __future__ import annotations + +import json +import logging +import warnings +from inspect import signature +from os import environ +from pydoc import locate +from typing import TYPE_CHECKING, Any + +import requests + +from airflow.exceptions import ( + AirflowException, + RemovedInAirflow3Warning, +) +from airflow.secrets.base_secrets import BaseSecretsBackend +from airflow.utils.log.logging_mixin import LoggingMixin + +log = logging.getLogger(__name__) + +if TYPE_CHECKING: + from airflow.models.connection import Connection + + +# These variables may violate the 'only start with datacoves-' rule +ACCEPT_VARIABLES = ("stmp_default",) +ACCEPT_CONNECTIONS = ("datahub_rest_default",) + + +def get_connection_parameter_names() -> set[str]: + """Return :class:`airflow.models.connection.Connection` constructor parameters.""" + from airflow.models.connection import Connection + + return {k for k in signature(Connection.__init__).parameters.keys() if k != "self"} + + +def _create_connection(conn_id: str, value: Any): + """Create a connection based on a URL or JSON object.""" + from airflow.models.connection import Connection + + if isinstance(value, str): + return Connection(conn_id=conn_id, uri=value) + if isinstance(value, dict): + connection_parameter_names = get_connection_parameter_names() | {"extra_dejson"} + current_keys = set(value.keys()) + if not current_keys.issubset(connection_parameter_names): + illegal_keys = current_keys - connection_parameter_names + illegal_keys_list = ", ".join(illegal_keys) + raise AirflowException( + f"The object have illegal keys: {illegal_keys_list}. " + f"The dictionary can only contain the following keys: {connection_parameter_names}" + ) + if "extra" in value and "extra_dejson" in value: + raise AirflowException( + "The extra and extra_dejson parameters are mutually exclusive. " + "Please provide only one parameter." + ) + if "extra_dejson" in value: + value["extra"] = json.dumps(value["extra_dejson"]) + del value["extra_dejson"] + + if "conn_id" in current_keys and conn_id != value["conn_id"]: + raise AirflowException( + f"Mismatch conn_id. " + f"The dictionary key has the value: {value['conn_id']}. " + f"The item has the value: {conn_id}." + ) + value["conn_id"] = conn_id + return Connection(**value) + raise AirflowException( + f"Unexpected value type: {type(value)}. The connection can only be defined using a string or object." + ) + + +class DatacovesBackend(BaseSecretsBackend, LoggingMixin): + """ + Retrieves Connection objects and Variables from Datacoves Secrets API + + :param env_slug: Datacoves Environment slug + :param api_token: API token to consume Datacoves API + """ + + def __init__(self): + super().__init__() + self.base_url = environ.get("DATACOVES__SECRETS_API_ENDPOINT") + self.api_token = None + self.secondary_secrets = None + + def _init(self): + """This can't run during __init__ because it causes an endless loop. + However, we can do this to check before getting secrets/etc. + """ + + from airflow.models.variable import Variable + + # Set up Datacoves secrets manager for Datacoves-based secrets + if self.api_token is None: + # Get our API token from varaibles if we can + self.api_token = Variable.get_variable_from_secrets( + key="datacoves-primary-secret" + ) + + if self.api_token is None: + raise AirflowException( + "Could not establish connection to Datacoves Secrets Backend " + "due to missing secret." + ) + + # Set up secondary secret manager if we have one + if self.secondary_secrets is None: + extra = Variable.get_variable_from_secrets(key="datacoves-secondary-secret") + + if not extra: + self.secondary_secrets = False + + else: + extra = json.loads(extra) + secret_class = locate(extra["backend"]) + self.secondary_secrets = secret_class(**extra["backend_config"]) + + def _get_secret_from_api(self, slug=str) -> str | None: + if not self.base_url: + return None + + headers = {"Authorization": f"token {self.api_token}"} + + response = requests.get( + self.base_url, headers=headers, verify=False, params={"slug": slug} + ) + + response.raise_for_status() + + items = response.json() + + if items and isinstance(items, list): + return items[0]["value"] + + return None + + def get_connection(self, conn_id: str) -> Connection | None: + if not conn_id.startswith("datacoves-") and \ + conn_id not in ACCEPT_CONNECTIONS: + return None + + self._init() + + # Try from our secondary first + if self.secondary_secrets: + conn_value = self.secondary_secrets.get_connection(conn_id) + + if conn_value is not None: + return conn_value + + conn_value = self._get_secret_from_api(conn_id) + + if conn_value: + return _create_connection(conn_id, conn_value) + + return None + + def get_connections(self, conn_id: str) -> list[Any]: + warnings.warn( + "This method is deprecated. Please use " + "`airflow.secrets.local_filesystem.DatacovesBackend.get_connection`.", + RemovedInAirflow3Warning, + stacklevel=2, + ) + + conn = self.get_connection(conn_id) + + if conn: + return [self.get_connection(conn_id)] + + return [] + + def get_variable(self, key: str) -> str | None: + # Don't try to fetch connection info for our own connections. + if key not in ACCEPT_VARIABLES and ( + not key.startswith("datacoves-") + or key + in ( + "datacoves-primary-secret", + "datacoves-secondary-secret", + ) + ): + return None + + self._init() + + # Try from our secondary first + if self.secondary_secrets: + key_value = self.secondary_secrets.get_variable(key) + + if key_value is not None: + return key_value + + return self._get_secret_from_api(key) diff --git a/src/ci/airflow/Dockerfile b/src/ci/airflow/Dockerfile new file mode 100644 index 00000000..cc848a0b --- /dev/null +++ b/src/ci/airflow/Dockerfile @@ -0,0 +1,102 @@ +FROM python:3.10 AS base + +LABEL com.datacoves.from=python:3.10 +LABEL com.datacoves.library.ci-airflow.psycopg2-binary=2.9.9 +LABEL com.datacoves.library.ci-airflow.dbt-redshift=1.9.0 +LABEL com.datacoves.library.ci-airflow.dbt-databricks=1.9.0 +LABEL com.datacoves.library.ci-airflow.dbt-snowflake=1.9.0 +LABEL com.datacoves.library.ci-airflow.snowflake-connector-python[pandas]=3.14.0 +LABEL com.datacoves.library.ci-airflow.snowflake-snowpark-python=1.25.0 +LABEL com.datacoves.library.ci-airflow.dbt-bigquery=1.9.0 +LABEL com.datacoves.library.ci-airflow.acryl-datahub=0.15.0.5 +LABEL com.datacoves.library.ci-airflow.dbt-core=1.9.0 +LABEL com.datacoves.library.ci-airflow.protobuf=5.29.3 +LABEL com.datacoves.library.ci-airflow.dbt-coves=1.9.5 +LABEL com.datacoves.library.ci-airflow.git+https://gitlab.com/datacoves/permifrost.git=v0.15.6 +LABEL com.datacoves.library.ci-airflow.pre-commit=3.7.1 +LABEL com.datacoves.library.ci-airflow.PyYAML=6.0.2 +LABEL com.datacoves.library.ci-airflow.shandy-sqlfmt[jinjafmt]=0.26.0 +LABEL com.datacoves.library.ci-airflow.sqlfluff=3.1.1 +LABEL com.datacoves.library.ci-airflow.sqlfluff-templater-dbt=3.1.1 +LABEL com.datacoves.library.ci-airflow.rich=14.0.0 +LABEL com.datacoves.library.ci-airflow.kubernetes=31.0.0 +LABEL com.datacoves.library.ci-airflow.uv=0.4.30 +LABEL com.datacoves.library.ci-airflow.ruff=0.8.3 +LABEL com.datacoves.library.ci-airflow.snowflake-cli=3.7.1 +LABEL com.datacoves.library.ci-airflow.certifi=2025.1.31 + + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + git \ + g++ \ + rsync \ + awscli \ + libxml2-dev \ + libxmlsec1-dev \ + libxmlsec1-openssl \ + && apt-get autoremove -yqq --purge \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + +RUN python -m venv /opt/datacoves/virtualenvs/main +ENV AIRFLOW_HOME=/opt/airflow +COPY providers ./providers + +# Variable: AIRFLOW__DATABASE__SQL_ALCHEMY_CONN +COPY airflow.db /opt/airflow/airflow.db +ENV AIRFLOW__DATABASE__SQL_ALCHEMY_CONN=sqlite:////opt/airflow/airflow.db + +RUN pip install -U pip && \ + pip install uv && \ + uv pip install --system --no-cache-dir -r ./providers/providers.txt && \ + uv pip install --system apache-airflow==2.10.3 --constraint https://raw.githubusercontent.com/apache/airflow/constraints-2.10.3/constraints-3.10.txt && \ + uv pip install --system pytest==8.3.3 pytest-env && \ + pip install ./providers/datacoves + +# Uninstalling openlineage-airflow package installed by acryl-datahub-airflow-plugin[plugin-v2] +# since it is not needed on airflow 2.7+ +RUN pip uninstall -y openlineage-airflow + +# copy custom plugins used by Datacoves +COPY plugins/ $AIRFLOW_HOME/plugins/ + +ENV PYTHONIOENCODING=utf-8 +ENV LANG C.UTF-8 +ENV AIRFLOW__CORE__LOAD_EXAMPLES=False + +WORKDIR /usr/app +VOLUME /usr/app + +COPY test_dags.py /usr/app/test_dags.py + +# copy script that changes default app name on dbt adapters +COPY set_adapters_app.sh /opt/datacoves/ + +FROM base AS dbt-snowflake + +COPY profiles/dbt-snowflake ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-snowflake.txt \ + && /opt/datacoves/set_adapters_app.sh all + +FROM base AS dbt-redshift + +COPY profiles/dbt-redshift ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-redshift.txt \ + && /opt/datacoves/set_adapters_app.sh postgres + +FROM base AS dbt-bigquery + +COPY profiles/dbt-bigquery ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-bigquery.txt \ + && /opt/datacoves/set_adapters_app.sh bigquery /opt/datacoves/virtualenvs/main/lib --skip-validation + +FROM base AS dbt-databricks + +COPY profiles/dbt-databricks ./ +RUN VIRTUAL_ENV=/opt/datacoves/virtualenvs/main \ + uv pip install -r dbt-databricks.txt \ + && /opt/datacoves/set_adapters_app.sh databricks /opt/datacoves/virtualenvs/main/lib --skip-validation diff --git a/src/ci/airflow/airflow.db b/src/ci/airflow/airflow.db new file mode 100644 index 00000000..1b5847c3 Binary files /dev/null and b/src/ci/airflow/airflow.db differ diff --git a/src/ci/airflow/mock_server.py b/src/ci/airflow/mock_server.py new file mode 100644 index 00000000..95806e57 --- /dev/null +++ b/src/ci/airflow/mock_server.py @@ -0,0 +1,24 @@ +""" +Launch mock service that responds to (https://pythonbasics.org/webserver/): +/api/connections/list +/api/workspaces/list +""" + +from http.server import BaseHTTPRequestHandler, HTTPServer + +host_name = "localhost" +server_port = 8001 + + +class MockAirbyteServer(BaseHTTPRequestHandler): + def do_GET(self): + self.send_response(200) + self.send_header("Content-type", "application/json") + self.end_headers() + self.wfile.write(bytes(dict())) + + +if __name__ == "__main__": + web_server = HTTPServer((host_name, server_port), MockAirbyteServer) + print(f"Server started at http://{host_name}:{server_port}") + web_server.serve_forever() diff --git a/src/ci/airflow/profiles/base/python/.gitkeep b/src/ci/airflow/profiles/base/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/airflow/profiles/dbt-bigquery/python/.gitkeep b/src/ci/airflow/profiles/dbt-bigquery/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/airflow/profiles/dbt-databricks/python/.gitkeep b/src/ci/airflow/profiles/dbt-databricks/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/airflow/profiles/dbt-redshift/python/.gitkeep b/src/ci/airflow/profiles/dbt-redshift/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/airflow/profiles/dbt-snowflake/python/.gitkeep b/src/ci/airflow/profiles/dbt-snowflake/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/airflow/test_dags.py b/src/ci/airflow/test_dags.py new file mode 100644 index 00000000..ddcd626e --- /dev/null +++ b/src/ci/airflow/test_dags.py @@ -0,0 +1,142 @@ +import argparse +import ast +import os +import time +from pathlib import Path + +from airflow.models import DagBag + + +class DatacovesFunctionVisitor(ast.NodeVisitor): + def __init__(self, source_code): + self.in_task_function = False + self.variable_usage_outside_task = [] + self.source_code = source_code + + def visit_FunctionDef(self, node): + # Check if the function is decorated with @task + if any( + (isinstance(decorator, ast.Name) and decorator.id == "task") + or ( + isinstance(decorator, ast.Call) + and isinstance(decorator.func, ast.Name) + and decorator.func.id == "task" + ) + or ( + isinstance(decorator, ast.Attribute) + and isinstance(decorator.value, ast.Name) + and decorator.value.id == "task" + ) + for decorator in node.decorator_list + ): + self.in_task_function = True + self.generic_visit(node) + self.in_task_function = False + else: + self.generic_visit(node) + + def visit_Name(self, node): + if node.id == "Variable": + if not self.in_task_function: + self.variable_usage_outside_task.append((node.lineno, node.col_offset)) + self.generic_visit(node) + + +try: + DAG_FOLDER = Path(os.environ["AIRFLOW__CORE__DAGS_FOLDER"]) +except KeyError: + raise Exception("AIRFLOW__CORE__DAGS_FOLDER environment variable not set.") + +parser = argparse.ArgumentParser(description="Test Airflow DAGs.") +parser.add_argument( + "--dag-loadtime-threshold", + type=int, + default=1, + help="Threshold for DAG loading time in seconds.", +) +parser.add_argument( + "--check-variable-usage", + action="store_true", + default=False, + help="Check Variable usage outside @task decorated functions.", +) +parser.add_argument( + "--write-output", + action="store_true", + default=False, + help="Write the output to a file.", +) +parser.add_argument( + "--filename", + type=str, + default="test_dags_results.md", + help="Filename of the output file.", +) +args = parser.parse_args() + +DAG_LOADTIME_THRESHOLD = args.dag_loadtime_threshold +CHECK_VARIABLE_USAGE = args.check_variable_usage +WRITE_OUTPUT = args.write_output +OUTPUT_FILENAME = args.filename +DAGBAG = DagBag() + +dag_counter = 0 +outputs = [] +dag_files = [ + f + for f in DAG_FOLDER.rglob("*.py") + if f.is_file() and f.suffix == ".py" and "__init__.py" not in str(f) +] + + +def _detect_variable_usage_outside_taskflow(file_path): + with open(file_path, "r") as file: + source_code = file.read() + tree = ast.parse(source_code, filename=file_path) + + visitor = DatacovesFunctionVisitor(source_code) + visitor.visit(tree) + + return visitor.variable_usage_outside_task + + +for dag_file in dag_files: + print(f"Loading {dag_file}") + if CHECK_VARIABLE_USAGE: + # Check for Variable usage outside @task decorated functions + variable_usage_outside_task = _detect_variable_usage_outside_taskflow(dag_file) + if variable_usage_outside_task: + outputs.append( + f"{dag_file} has Variable usage outside @task decorated functions at: " + f"{variable_usage_outside_task}" + ) + start = time.time() + try: + dag = DAGBAG.process_file(dag_file.absolute().as_posix(), only_if_updated=True) + except ImportError as e: + outputs.append(f"{dag_file} has an error: {e}") + except KeyError: + # KeyErrors at DagBag loading are due to missing Variables + # this is expected in CI + pass + end = time.time() + # Print a warning if the DAG took too long to load + if end - start > DAG_LOADTIME_THRESHOLD: + outputs.append(f"{dag_file} took {end - start:.5f} seconds to load.") + dag_counter += 1 + +print(f"Parsed {dag_counter} DAGs.") + + +if outputs: + print("Warnings found") + print("\n".join(outputs)) +if WRITE_OUTPUT: + with open(OUTPUT_FILENAME, "w") as file: + if outputs: + file.write("⚠️ **Test Warnings Detected:**\n\n") + for output in outputs: + file.write(f"- {output}\n") + else: + file.write("No warnings found.") + print(f"Results written to {os.getcwd()}/{OUTPUT_FILENAME}") diff --git a/src/ci/basic/Dockerfile b/src/ci/basic/Dockerfile new file mode 100644 index 00000000..6779477f --- /dev/null +++ b/src/ci/basic/Dockerfile @@ -0,0 +1,67 @@ +FROM python:3.10 AS base +LABEL com.datacoves.from=python:3.10 +LABEL com.datacoves.library.ci-basic.psycopg2-binary=2.9.9 +LABEL com.datacoves.library.ci-basic.dbt-redshift=1.9.0 +LABEL com.datacoves.library.ci-basic.dbt-databricks=1.9.0 +LABEL com.datacoves.library.ci-basic.dbt-snowflake=1.9.0 +LABEL com.datacoves.library.ci-basic.snowflake-connector-python[pandas]=3.14.0 +LABEL com.datacoves.library.ci-basic.snowflake-snowpark-python=1.25.0 +LABEL com.datacoves.library.ci-basic.dbt-bigquery=1.9.0 +LABEL com.datacoves.library.ci-basic.acryl-datahub=0.15.0.5 +LABEL com.datacoves.library.ci-basic.dbt-core=1.9.0 +LABEL com.datacoves.library.ci-basic.protobuf=5.29.3 +LABEL com.datacoves.library.ci-basic.dbt-coves=1.9.5 +LABEL com.datacoves.library.ci-basic.git+https://gitlab.com/datacoves/permifrost.git=v0.15.6 +LABEL com.datacoves.library.ci-basic.pre-commit=3.7.1 +LABEL com.datacoves.library.ci-basic.PyYAML=6.0.2 +LABEL com.datacoves.library.ci-basic.shandy-sqlfmt[jinjafmt]=0.26.0 +LABEL com.datacoves.library.ci-basic.sqlfluff=3.1.1 +LABEL com.datacoves.library.ci-basic.sqlfluff-templater-dbt=3.1.1 +LABEL com.datacoves.library.ci-basic.rich=14.0.0 +LABEL com.datacoves.library.ci-basic.kubernetes=31.0.0 +LABEL com.datacoves.library.ci-basic.uv=0.4.30 +LABEL com.datacoves.library.ci-basic.ruff=0.8.3 +LABEL com.datacoves.library.ci-basic.snowflake-cli=3.7.1 +LABEL com.datacoves.library.ci-basic.certifi=2025.1.31 + + +RUN apt-get update \ + && apt-get install -y --no-install-recommends \ + git \ + g++ \ + rsync \ + awscli \ + && apt-get autoremove -yqq --purge \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* \ + && pip install uv + +ENV PYTHONIOENCODING=utf-8 +ENV LANG C.UTF-8 + +WORKDIR /usr/app +VOLUME /usr/app + +# copy script that changes default app name on dbt adapters +COPY set_adapters_app.sh ./ + +FROM base AS dbt-snowflake + +COPY profiles/dbt-snowflake ./ +RUN uv pip install --system -r dbt-snowflake.txt \ + && ./set_adapters_app.sh all /usr/local/lib + +FROM base AS dbt-redshift + +COPY profiles/dbt-redshift ./ +RUN uv pip install --system -r dbt-redshift.txt && ./set_adapters_app.sh postgres /usr/local/lib + +FROM base AS dbt-bigquery + +COPY profiles/dbt-bigquery ./ +RUN uv pip install --system -r dbt-bigquery.txt && ./set_adapters_app.sh bigquery /usr/local/lib --skip-validation + +FROM base AS dbt-databricks + +COPY profiles/dbt-databricks ./ +RUN uv pip install --system -r dbt-databricks.txt && ./set_adapters_app.sh databricks /usr/local/lib --skip-validation diff --git a/src/ci/basic/profiles/dbt-bigquery/python/.gitkeep b/src/ci/basic/profiles/dbt-bigquery/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/basic/profiles/dbt-databricks/python/.gitkeep b/src/ci/basic/profiles/dbt-databricks/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/basic/profiles/dbt-redshift/python/.gitkeep b/src/ci/basic/profiles/dbt-redshift/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/basic/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-0.6.2.vsix b/src/ci/basic/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-0.6.2.vsix new file mode 100644 index 00000000..fdc908aa Binary files /dev/null and b/src/ci/basic/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-0.6.2.vsix differ diff --git a/src/ci/basic/profiles/dbt-snowflake/python/.gitkeep b/src/ci/basic/profiles/dbt-snowflake/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/ci/multiarch/Dockerfile b/src/ci/multiarch/Dockerfile new file mode 100644 index 00000000..35ea32fa --- /dev/null +++ b/src/ci/multiarch/Dockerfile @@ -0,0 +1,15 @@ +FROM docker:20 + +ARG BUILDX_URL=https://github.com/docker/buildx/releases/download/v0.10.4/buildx-v0.10.4.linux-amd64 + +# Enable experimental features in Docker client +RUN mkdir -p $HOME/.docker \ + && echo -e '{\n "experimental": "enabled"\n}' | tee $HOME/.docker/config.json \ + && mkdir -p $HOME/.docker/cli-plugins/ \ + && wget -O $HOME/.docker/cli-plugins/docker-buildx $BUILDX_URL \ + && chmod a+x $HOME/.docker/cli-plugins/docker-buildx \ + && apk add --update py-pip python3 gcc curl git bash make go jq \ + && pip install uv + +COPY requirements.txt . +RUN pip install -r requirements.txt diff --git a/src/code-server/code-server/99-datacoves-init b/src/code-server/code-server/99-datacoves-init new file mode 100755 index 00000000..9e53101c --- /dev/null +++ b/src/code-server/code-server/99-datacoves-init @@ -0,0 +1,50 @@ +#!/usr/bin/with-contenv bash + +# Directories +SCRIPTS_DIR="/opt/datacoves/custom-cont-init.d" +SERVICES_DIR="/opt/datacoves/custom-services.d" + +# Remove all existing custom services before continuing to ensure +# we aren't running anything the user may have removed +if [ -n "$(/bin/ls -A /etc/services.d/custom-service-* 2>/dev/null)" ]; then + echo "[custom-init] removing existing custom services..." + rm -rf /etc/services.d/custom-service-* +fi + +# Make sure custom init directory exists and has files in it +if ([ -e "${SCRIPTS_DIR}" ] && \ + [ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]) || \ + ([ -e "${SERVICES_DIR}" ] && \ + [ -n "$(/bin/ls -A ${SERVICES_DIR} 2>/dev/null)" ]); then + if [ -n "$(/bin/ls -A ${SCRIPTS_DIR} 2>/dev/null)" ]; then + echo "[custom-init] files found in ${SCRIPTS_DIR} executing" + for SCRIPT in ${SCRIPTS_DIR}/*; do + NAME="$(basename "${SCRIPT}")" + if [ -f "${SCRIPT}" ]; then + echo "[custom-init] ${NAME}: executing..." + # perl interprets shebang and uses such binary to run the script (bash/python) + /usr/bin/perl ${SCRIPT} + echo "[custom-init] ${NAME}: exited $?" + elif [ ! -f "${SCRIPT}" ]; then + echo "[custom-init] ${NAME}: is not a file" + fi + done + fi + if [ -n "$(/bin/ls -A ${SERVICES_DIR} 2>/dev/null)" ]; then + echo "[custom-init] service files found in ${SERVICES_DIR}" + for SERVICE in ${SERVICES_DIR}/*; do + NAME="$(basename "${SERVICE}")" + if [ -d "${SERVICE}" ]; then + echo "[custom-init] ${NAME}: service detected, copying..." + mkdir -p /etc/services.d/custom-service-${NAME}/ + cp -R ${SERVICE}/* /etc/services.d/custom-service-${NAME}/ + chmod +x /etc/services.d/custom-service-${NAME}/run + echo "[custom-init] ${NAME}: copied" + elif [ ! -d "${SERVICE}" ]; then + echo "[custom-init] ${NAME}: is not a directory" + fi + done + fi +else + echo "[custom-init] no custom files found exiting..." +fi \ No newline at end of file diff --git a/src/code-server/code-server/Dockerfile b/src/code-server/code-server/Dockerfile new file mode 100644 index 00000000..4952a017 --- /dev/null +++ b/src/code-server/code-server/Dockerfile @@ -0,0 +1,105 @@ +FROM linuxserver/code-server:4.91.0 AS base + +LABEL com.datacoves.from=linuxserver/code-server:4.91.0 +LABEL com.datacoves.version.code-server=4.91.0 +LABEL com.datacoves.library.code-server.psycopg2-binary=2.9.9 +LABEL com.datacoves.library.code-server.dbt-redshift=1.9.0 +LABEL com.datacoves.library.code-server.dbt-databricks=1.9.0 +LABEL com.datacoves.library.code-server.dbt-snowflake=1.9.0 +LABEL com.datacoves.library.code-server.snowflake-connector-python[pandas]=3.14.0 +LABEL com.datacoves.library.code-server.snowflake-snowpark-python=1.25.0 +LABEL com.datacoves.library.code-server.dbt-bigquery=1.9.0 +LABEL com.datacoves.library.code-server.acryl-datahub=0.15.0.5 +LABEL com.datacoves.library.code-server.dbt-core=1.9.0 +LABEL com.datacoves.library.code-server.protobuf=5.29.3 +LABEL com.datacoves.library.code-server.dbt-coves=1.9.5 +LABEL com.datacoves.library.code-server.git+https://gitlab.com/datacoves/permifrost.git=v0.15.6 +LABEL com.datacoves.library.code-server.pre-commit=3.7.1 +LABEL com.datacoves.library.code-server.PyYAML=6.0.2 +LABEL com.datacoves.library.code-server.shandy-sqlfmt[jinjafmt]=0.26.0 +LABEL com.datacoves.library.code-server.sqlfluff=3.1.1 +LABEL com.datacoves.library.code-server.sqlfluff-templater-dbt=3.1.1 +LABEL com.datacoves.library.code-server.rich=14.0.0 +LABEL com.datacoves.library.code-server.kubernetes=31.0.0 +LABEL com.datacoves.library.code-server.uv=0.4.30 +LABEL com.datacoves.library.code-server.ruff=0.8.3 +LABEL com.datacoves.library.code-server.snowflake-cli=3.7.1 +LABEL com.datacoves.library.code-server.certifi=2025.1.31 +LABEL com.datacoves.extension.code-server.ms-python.python=2024.14.1 +LABEL com.datacoves.extension.code-server.dorzey.vscode-sqlfluff=3.2.0 +LABEL com.datacoves.extension.code-server.robertostermann.better-status-bar=1.0.9 +LABEL com.datacoves.extension.code-server.janisdd.edit-csv=0.10.0 +LABEL com.datacoves.extension.code-server.redhat.vscode-yaml=1.15.0 +LABEL com.datacoves.extension.code-server.samuelcolvin.jinjahtml=0.20.0 +LABEL com.datacoves.extension.code-server.datacoves.datacoves-power-user=0.10.1 +LABEL com.datacoves.extension.code-server.sleistner.vscode-fileutils=3.10.3 +LABEL com.datacoves.extension.code-server.streetsidesoftware.code-spell-checker=3.0.1 +LABEL com.datacoves.extension.code-server.snowflake.snowflake-vsc=1.10.5 +LABEL com.datacoves.extension.code-server.mtxr.sqltools=0.28.3 +LABEL com.datacoves.extension.code-server.mtxr.sqltools-driver-pg=0.5.4 +LABEL com.datacoves.extension.code-server.charliermarsh.ruff=2024.56.0 +LABEL com.datacoves.extension.code-server.mechatroner.rainbow-csv=3.3 +LABEL com.datacoves.extension.code-server.datacoves.datacoves-copilot=0.1.3 + +# Install required packages and Python 3.10 +RUN apt-get update && \ + apt-get install -y unzip software-properties-common gcc vim git-secret \ + python3-distutils python3-dev python3-pip python3.10-venv libpq-dev --no-install-recommends && \ + rm -rf /var/lib/apt/lists/* + +# Update symlinks +RUN ln -s /usr/bin/python3.10 /usr/bin/python + +# Copy files +COPY run /etc/s6-overlay/s6-rc.d/svc-code-server/run +COPY 99-datacoves-init /etc/cont-init.d/99-datacoves-init +COPY datacoves /opt/datacoves +ADD bin /usr/bin/ + +# Directory setup +RUN mkdir -p /config/.local && chown -R abc:abc /config + +FROM base AS dbt-snowflake + +COPY profiles/dbt-snowflake/python /opt/datacoves/profile/python/ +COPY profiles/dbt-snowflake/extensions /opt/datacoves/profile/extensions/ +COPY profiles/common-dbt/extensions /opt/datacoves/profile/extensions/ + +RUN sudo -u abc bash -c "pip install -r /opt/datacoves/profile/python/dbt-snowflake.txt --no-warn-script-location" \ + && /opt/datacoves/set_adapters_app.sh all /config/.local/lib \ + && mv /config/.local /opt/datacoves/profile/python/local + +FROM base AS dbt-redshift + +COPY profiles/dbt-redshift/python /opt/datacoves/profile/python/ +COPY profiles/dbt-redshift/extensions /opt/datacoves/profile/extensions/ +COPY profiles/common-dbt/extensions /opt/datacoves/profile/extensions/ + +RUN sudo -u abc bash -c "pip install -r /opt/datacoves/profile/python/dbt-redshift.txt --no-warn-script-location" \ + && /opt/datacoves/set_adapters_app.sh all /config/.local/lib \ + && mv /config/.local /opt/datacoves/profile/python/local + +FROM base AS dbt-bigquery + +COPY profiles/dbt-bigquery/python /opt/datacoves/profile/python/ +# COPY profiles/dbt-bigquery/extensions /opt/datacoves/profile/extensions/ +COPY profiles/common-dbt/extensions /opt/datacoves/profile/extensions/ + +RUN sudo -u abc bash -c "pip install -r /opt/datacoves/profile/python/dbt-bigquery.txt --no-warn-script-location" \ + && /opt/datacoves/set_adapters_app.sh bigquery /config/.local/lib --skip-validation \ + && mv /config/.local /opt/datacoves/profile/python/local + +FROM base AS dbt-databricks + +COPY profiles/dbt-databricks/python /opt/datacoves/profile/python/ +# COPY profiles/dbt-databricks/extensions /opt/datacoves/profile/extensions/ +COPY profiles/common-dbt/extensions /opt/datacoves/profile/extensions/ + +# FIXME: workaround by adding pip install -U pip==21.3.1 to avoid error: +# https://github.com/pypa/pip/issues/10851 +RUN sudo -u abc bash -c "pip install -U pip==21.3.1 && pip install -r /opt/datacoves/profile/python/dbt-databricks.txt --no-warn-script-location" \ + && /opt/datacoves/set_adapters_app.sh databricks /config/.local/lib --skip-validation \ + && /opt/datacoves/set_adapters_app.sh spark /config/.local/lib --skip-validation \ + && mv /config/.local /opt/datacoves/profile/python/local + + diff --git a/src/code-server/code-server/bin/datacoves b/src/code-server/code-server/bin/datacoves new file mode 100755 index 00000000..a8801853 --- /dev/null +++ b/src/code-server/code-server/bin/datacoves @@ -0,0 +1,580 @@ +#!/bin/env python3 +# +# Datacoves Command Line Tool for interacting with Datacoves specific +# features. + +import json +import os +import shlex +import socket +import sqlite3 +import sys +from argparse import ArgumentParser +from pathlib import Path + +import questionary +import requests +import yaml +from kubernetes import client, config +from kubernetes.stream import stream +from rich.console import Console +from rich.progress import Progress + +# Disable requests warnings +requests.packages.urllib3.disable_warnings() + +console = Console() + + +MY_AIRFLOW_DB_PATH = "/config/local-airflow/db/airflow.db" + + +class DbtProfileToAirflow: + """This translates a DBT profile into an Airflow connection DB row.""" + + @classmethod + def method_name(cls, type: str) -> str: + """Generates the method name for the given Airflow connection type""" + + return f"translate_{type}" + + @classmethod + def is_supported(cls, type: str) -> bool: + """Is Airflow connection type 'type' supported by this class?""" + + return hasattr(cls, cls.method_name(type)) + + @classmethod + def translate(cls, profile: dict) -> dict: + """This does the actual translation of a profile to an airflow + Connection object dictionary""" + + method = cls.method_name(profile.get("type", "")) + + if not hasattr(cls, method): + raise RuntimeError( + f"Profile of type {profile.get('type', '')} is not yet " "supported." + ) + + return getattr(cls, method)(profile) + + @classmethod + def translate_snowflake(cls, profile: dict) -> dict: + """Translates a snowflake dbt profile to an airflow connection""" + + extra = {} + + # Copy these into extra if they are set + for field in ["account", "warehouse", "database", "role"]: + if field in profile: + extra[field] = profile[field] + + ret = { + "schema": profile.get("schema"), + "login": profile.get("user"), + "host": profile.get("host"), + "port": profile.get("port"), + "password": profile.get("password"), + } + + if "private_key_path" in profile: + with open(profile["private_key_path"], "r") as f: + extra["private_key_content"] = f.read() + del ret["password"] + + if profile.get("authenticator") == "username_password_mfa": + extra["mfa_protected"] = True + + ret["extra"] = json.dumps(extra) + return ret + + @classmethod + def translate_redshift(cls, profile: dict) -> dict: + return { + "schema": profile.get("dbname"), + "login": profile.get("user"), + "host": profile.get("host"), + "port": profile.get("port"), + "password": profile.get("password"), + "extra": json.dumps( + { + "schema": profile.get("schema", ""), + } + ), + } + + @classmethod + def translate_databricks(cls, profile: dict) -> dict: + return { + "schema": profile.get("schema"), + "host": profile.get("host"), + "password": profile.get("token"), + "extra": json.dumps( + { + "token": profile.get("token"), + "http_path": profile.get("http_path"), + } + ), + } + + @classmethod + def translate_bigquery(cls, profile: dict) -> dict: + return { + "extra": json.dumps( + { + "project": profile.get("project"), + "dataset": profile.get("dataset"), + "keyfile_dict": json.dumps(profile.get("keyfile_json")), + } + ), + } + + +def get_user_input() -> str: + """Fetches multi-line user input""" + + ret = "" + + try: + while True: + next_line = input() + + if ret == "": # Empty, don't inject a \n + ret = next_line + + else: + ret += "\n" + next_line + + except EOFError: + return ret + + +def quick_query(*arg, **argv): + """This runs a 'quick query', which is to say, we open My Airflow's + DB, do the query, and close it right away. This avoids lingering with + the DB handle open which locks up My Airflow and can have other + various problems. + + All parameters are passed to the cursor.execute method. If you + provide the parameter 'and_then', it will run that method on the + cursor and return the value. Otherwise, this returns None. + """ + + # Try to open our database + db = sqlite3.connect(MY_AIRFLOW_DB_PATH) + cursor = db.cursor() + + and_then = False + + if "and_then" in argv: + and_then = argv["and_then"] + del argv["and_then"] + + cursor.execute(*arg, **argv) + + ret = None + + if and_then: + ret = getattr(cursor, and_then)() + + db.commit() + cursor.close() + db.close() + + return ret + + +def airflow_import(args): + """Import variables from airflow. 'args' is the paramaters from the + arg parser; right now it just understands 'quiet' which will avoid + prompting for missing secrets. + """ + + quiet = args.quiet + + import_variables = not args.connections + import_connections = not args.variables + + if not import_variables and not import_connections: + print("Hmm... nothing to do!") + sys.exit(0) + + # Make sure local airflow database exists. + if not os.path.isfile(MY_AIRFLOW_DB_PATH): + print( + "You must start My Airflow and let it fully start up before " + "you can import Team Airflow items into it." + ) + sys.exit(1) + + ### + ### BASIC COMMAND VALIDATION + ### + + # Make sure required environment variables are set + token = os.environ.get("DATACOVES__SECRETS_TOKEN") + url = os.environ.get("DATACOVES__SECRETS_URL") + slug = os.environ.get("DATACOVES__ENVIRONMENT_SLUG") + + if not token: + print("DATACOVES__SECRETS_TOKEN environment variable is missing.") + sys.exit(1) + + if not url: + print("DATACOVES__SECRETS_URL environment variable is missing.") + sys.exit(1) + + if not slug: + print( + "DC_CUSTOM__DATACOVES__ENVIRONMENT_SLUG environment variable " "is missing." + ) + sys.exit(1) + + if url[-1] != "/": + url += "/" + + ### + ### LOAD LOCAL DBT PROFILES - only if importing connections + ### + + # Map profile type to list of potential profiles. + profiles = {} + + if import_connections: + # Try to load profiles.yml, only if loading connections. + profiles_path = ( + Path(os.environ.get("DBT_PROFILES_DIR", f"{os.environ.get('HOME')}/.dbt")) + / "profiles.yml" + ) + + if not profiles_path.exists(): + print( + "Missing user database connection: My Airflow uses your user's " + "own credentials to operate in the development environment. " + "Please configure your database connection in your Datacoves " + "User Settings and try again." + ) + sys.exit(1) + + with open(str(profiles_path), "rt") as input: + profiles_file = yaml.safe_load(input) + + if not isinstance(profiles_file, dict): + print( + "ERROR: Could not parse your profiles.yml file. This is " + "rather unusual - contact Datacoves support and provide this " + "error message." + ) + sys.exit(1) + + try: + for name, details in ( + next(iter(profiles_file.values())).get("outputs", {}).items() + ): + type = details.get("type", "") + + if type not in profiles: + profiles[type] = [] + + profiles[type].append(name) + + except: + # There's a number of things that can go wrong here. + # All are unparsablae profiles.yml file + print( + "WARNING: Could not parse your profiles.yml file. This is " + "an unusual error - contact Datacoves support and provide " + "this error message." + ) + sys.exit(1) + + ### + ### LOAD AIRFLOW SECRETS + ### + + # Try to get the airflow environment from Datacoves API + result = requests.get( + f"{url}api/v1/secrets-fetch/{slug}", + headers={"Authorization": f"Token {token}"}, + verify=False, + ) + + if result.status_code != 200: + print(f"Got status {result.status_code}: {result.text}") + sys.exit(1) + + result = result.json() + + # Start the import process + if import_variables: + if not quiet: + print("Importing variables ...") + + for variable in result["variables"]: + # Does the variable exist? + if ( + quick_query( + "select id from variable where key=?", + [variable["key"]], + and_then="fetchone", + ) + is not None + ): + if not quiet: + print(f"Variable {variable['key']} already exists, skipping...") + + continue + + # Is the variable redacted? If so, we need to make a place holder + # or set it. + if variable["is_redacted"]: + if quiet: + variable["value"] = "" + + else: + print(f"Enter a value for secret variable {variable['key']}.") + print("Use CTRL-D to finish entering the value.") + variable["value"] = get_user_input() + + elif not quiet: + print(f"Importing {variable['key']}...") + + quick_query( + "insert into variable (key, val, description) values (?, ?, ?)", + [variable["key"], variable["value"], variable["description"]], + ) + + if import_connections: + if not quiet: + print("Importing connections...") + + for connection in result["connections"]: + # Does the connection exist? + if ( + quick_query( + "select id from connection where conn_id=?", + [connection["connection_id"]], + and_then="fetchone", + ) + is not None + ): + if not quiet: + print( + f"Connection {connection['connection_id']} already exists, " + "skipping..." + ) + + continue + + # Correct the type + type = connection["conn_type"] + + if "bigquery" in type: + type = "bigquery" + + if type in profiles: + if not quiet and len(profiles[type]) > 1: + print( + "Select a profile to map to connection ID: " + + connection["connection_id"] + ) + + profile = questionary.select( + "Choose Profile", choices=profiles[type] + ).ask() + + if profile is None: + # User hit CTRL-C + sys.exit(0) + + else: + profile = profiles[type][0] + + if not quiet: + print( + f"Mapping connection ID {connection['connection_id']} " + f"to profile {profile} because you have only one. " + "You can edit this in My Airflow if you need to." + ) + + else: + profile = None + + if DbtProfileToAirflow.is_supported(type): + print( + "We could not map connection ID " + + connection["connection_id"] + + "to any of your User Database Connections because " + "you do not have one of type " + + connection["conn_type"] + + ". You will need to add this one manually." + ) + + else: + print( + "Skipping connection ID " + + connection["connection_id"] + + " because it is not supported by the import process." + + " You will need to add this one manually." + ) + + continue + + # Copy the profile into My Airflow. + new_conn = DbtProfileToAirflow.translate( + next(iter(profiles_file.values())).get("outputs").get(profile) + ) + + quick_query( + "insert into connection " + "(conn_id, conn_type, description, host, schema, login, " + "password, port, extra) values (?, ?, ?, ?, ?, ?, ?, ?, ?)", + [ + connection["connection_id"], + connection["conn_type"], + connection["description"], + new_conn.get("host", ""), + new_conn.get("schema", ""), + new_conn.get("login", ""), + new_conn.get("password", ""), + new_conn.get("port", ""), + new_conn.get("extra", ""), + ], + ) + + if not quiet: + print("Done!") + + +def local_airflow_pytest(args): + """Handle to run pytest in local airflow using kubectl""" + + # SHELL = "/bin/bash" + ENV_SLUG = os.environ.get("DATACOVES__ENVIRONMENT_SLUG") + NAMESPACE = f"dcw-{ENV_SLUG}" + POD_NAME = socket.gethostname() + CONTAINER_NAME = "local-airflow" + task_pytest = None + + with Progress() as progress: + try: + # Set up the API client for the pod's exec + config.load_incluster_config() + v1 = client.CoreV1Api() + + # Valid if the container exists in the pod + pod = v1.read_namespaced_pod(POD_NAME, NAMESPACE) + container_names = [container.name for container in pod.spec.containers] + + if CONTAINER_NAME not in container_names: + raise ValueError( + "You must start My Airflow and let it fully start up first." + ) + + # Execute command in the pod + task_pytest = progress.add_task("Running...", total=None) + + command = [ + "/bin/bash", + "-c", + "cd /opt/airflow/dags/repo && " + "pytest " + " ".join([shlex.quote(x) for x in args.arguments]), + ] + + exec_response = stream( + v1.connect_get_namespaced_pod_exec, + name=POD_NAME, + namespace=NAMESPACE, + container=CONTAINER_NAME, + command=command, + stderr=True, + stdin=False, + stdout=True, + tty=False, + ) + + sys.stdout.write(exec_response) + except client.exceptions.ApiException as e: + console.print(e.reason) + except Exception as e: + console.print(e) + finally: + if task_pytest is not None: + progress.update(task_pytest, visible=False) + + +def main(): + """Handle command line parsing and routing to subcommands""" + + parser = ArgumentParser( + prog="datacoves", + description="Datacoves command line control interface.", + ) + + subparsers = parser.add_subparsers(help="Available Subcommands", dest="subcommand") + + # Subparsers for Airflow Local + airflow_local_parser = subparsers.add_parser( + "my", + help="Execute commands for my Airflow", + ) + airflow_local_subparser = airflow_local_parser.add_subparsers( + help="Available Subcommands" + ) + import_parser = airflow_local_subparser.add_parser( + "import", + help="Import connections and variables from Team Airflow", + ) + import_parser.set_defaults(func=airflow_import) + import_parser.add_argument( + "-q", + "--quiet", + required=False, + help="Don't interactively ask for secrets.", + action="store_true", + ) + + import_parser.add_argument( + "-v", + "--variables", + required=False, + help="Only import varaibles", + action="store_true", + ) + + import_parser.add_argument( + "-c", + "--connections", + required=False, + help="Only import connections", + action="store_true", + ) + + pytest_parser = airflow_local_subparser.add_parser( + "pytest", + help="Run test in Airflow", + ) + pytest_parser.add_argument( + "arguments", + nargs="*", + help="Arguments for pytest; typically you will want to prefix this " + "with -- in order to allow passing of flag-like parameters. For " + "instance, '-- -h' to get help text. You should, at a minimum, " + "provide a path to a test file or folder such as: " + "-- /config/workspace/orchestrate/tests/my_test.py", + ) + pytest_parser.set_defaults(func=local_airflow_pytest) + + # Parser arguments + args = parser.parse_args() + + # Execute the function for the subcommand + if hasattr(args, "func"): + args.func(args) + else: + print("Subcommand required: See -h for help") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/01-python-libs.sh b/src/code-server/code-server/datacoves/custom-cont-init.d/01-python-libs.sh new file mode 100755 index 00000000..6f859288 --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/01-python-libs.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +set -e + +echo "[datacoves setup] Installing python libraries..." + +PYTHON_DIR=/opt/datacoves/profile/python + +if [ -d "${PYTHON_DIR}" ]; then + + rm -rf /config/.local + mv "$PYTHON_DIR"/local /config/.local + # # Replacing default app name in python libraries. + # /opt/datacoves/set_adapters_app.sh all /config/.local/lib + # /opt/datacoves/set_adapters_app.sh bigquery /config/.local/lib --skip-validation + # /opt/datacoves/set_adapters_app.sh databricks /config/.local/lib --skip-validation + # /opt/datacoves/set_adapters_app.sh spark /config/.local/lib --skip-validation + chown -R abc:abc /config/.local +else + + echo "[datacoves setup] No python libraries found in profile." + +fi diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/10-ssh-keys.py b/src/code-server/code-server/datacoves/custom-cont-init.d/10-ssh-keys.py new file mode 100755 index 00000000..bb0e9099 --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/10-ssh-keys.py @@ -0,0 +1,37 @@ +#!/usr/bin/python + +import os +import shutil +import json +from pathlib import Path + + +def generate_keys(): + """Generate ssh keys""" + + output_folder = Path("/config/.ssh") + keys_file = Path("/opt/datacoves/user/ssh_keys.json") + + if keys_file.exists(): + shutil.rmtree(output_folder, ignore_errors=True) + output_folder.mkdir(parents=True, exist_ok=True) + os.chown(output_folder, 1000, 1000) + data = json.load(open(keys_file)) + for key in data: + private_path = output_folder / f"id_{key['key_type']}" + print(f"Generating {private_path}...") + with open(private_path, "w") as private_file: + private_file.write(key["private"]) + + public_path = output_folder / f"id_{key['key_type']}.pub" + print(f"Generating {public_path}...") + with open(public_path, "w") as public_file: + public_file.write(key["public"]) + + os.chown(private_path, 1000, 1000) + os.chmod(private_path, 0o600) + os.chown(public_path, 1000, 1000) + + +if __name__ == "__main__": + generate_keys() diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/11-ssl-keys.py b/src/code-server/code-server/datacoves/custom-cont-init.d/11-ssl-keys.py new file mode 100755 index 00000000..c78a78e3 --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/11-ssl-keys.py @@ -0,0 +1,37 @@ +#!/usr/bin/python + +import os +import shutil +import json +from pathlib import Path + + +def generate_keys(): + """Generate ssl keys""" + + output_folder = Path("/config/.ssl") + keys_file = Path("/opt/datacoves/user/ssl_keys.json") + + if keys_file.exists(): + shutil.rmtree(output_folder, ignore_errors=True) + output_folder.mkdir(parents=True, exist_ok=True) + os.chown(output_folder, 1000, 1000) + data = json.load(open(keys_file)) + for key in data: + private_path = output_folder / f"{key['connection']}-private.pem" + print(f"Generating {private_path}...") + with open(private_path, "w") as private_file: + private_file.write(key["private"]) + + public_path = output_folder / f"{key['connection']}-public.pem" + print(f"Generating {public_path}...") + with open(public_path, "w") as public_file: + public_file.write(key["public"]) + + os.chown(private_path, 1000, 1000) + os.chmod(private_path, 0o600) + os.chown(public_path, 1000, 1000) + + +if __name__ == "__main__": + generate_keys() diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/20-extensions.sh b/src/code-server/code-server/datacoves/custom-cont-init.d/20-extensions.sh new file mode 100755 index 00000000..e878e4d4 --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/20-extensions.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +echo "[datacoves setup] Installing extensions..." + +rm -rf /config/extensions-prep +rm -rf /config/extensions + +echo "Debug: Creating /config/extensions-prep" +mkdir -p /config/extensions-prep +mkdir -p /config/extensions + +cd /opt/datacoves/profile/extensions + +for filename in *.vsix; do + echo " - processing $filename..." + + rm -rf /tmp/extension + unzip "$filename" extension/* -d /tmp + + rm -rf /config/extensions-prep/"${filename/.vsix/}" + + echo "Debug: Moving /tmp/extension to /config/extensions-prep/${filename/.vsix/}" + mv /tmp/extension /config/extensions-prep/"${filename/.vsix/}" +done + +chown -R abc:abc /config/extensions-prep +chown -R abc:abc /config/extensions diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/50-git-clone.py b/src/code-server/code-server/datacoves/custom-cont-init.d/50-git-clone.py new file mode 100755 index 00000000..864fed6f --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/50-git-clone.py @@ -0,0 +1,103 @@ +#!/usr/bin/python + +import os +import shutil +import subprocess +import tempfile +from pathlib import Path +from urllib.parse import urlparse + + +def git_config(fullname, email): + """Generates /config/.gitconfig file with user and email""" + + git_config_path = Path("/config/.gitconfig") + if not git_config_path.exists(): + with open(git_config_path, "w") as target_file: + target_file.write( + f"""[user] + name = {fullname} + email = {email}""" + ) + os.chown(git_config_path, 1000, 1000) + + +def add_known_hosts(repo_url): + """Generates /config/.ssh/known_hosts file including repo ssh public key""" + + ssh_repo_url = f"ssh://{repo_url}" if "ssh://" not in repo_url else repo_url + url_parsed = urlparse(ssh_repo_url) + domain = url_parsed.hostname + command = ["ssh-keyscan", "-t", "rsa"] + try: + command += ["-p", str(url_parsed.port)] + except ValueError: + pass + output = subprocess.run(command + [domain], capture_output=True, text=True) + new_host = output.stdout + known_hosts_path = Path("/config/.ssh/known_hosts") + if not known_hosts_path.exists(): + known_hosts_path.parent.mkdir(parents=True, exist_ok=True) + known_hosts_path.touch(exist_ok=True) + os.chown(known_hosts_path, 1000, 1000) + + hosts = open(known_hosts_path, "r").read() + if domain not in hosts: + with open(known_hosts_path, "a") as file: + file.write(new_host) + + +def demote(user_uid, user_gid): + """Pass the function 'set_ids' to preexec_fn, rather than just calling + setuid and setgid. This will change the ids for that subprocess only""" + + def set_ids(): + os.setgid(user_gid) + os.setuid(user_uid) + + return set_ids + + +def _move_folder_contents(source_path, dest_path): + """Move everything from a source directory to a destination one""" + source_files = os.listdir(source_path) + for file in source_files: + shutil.move(os.path.join(source_path, file), os.path.join(dest_path, file)) + + +def _run_git_clone(url, path): + command = ["git", "clone", "--filter=blob:none", url, path] + subprocess.run(command, preexec_fn=demote(1000, 1000)) + + +def git_clone(repo_url): + """Clones git repo if workspace is empty""" + + workspace_path = Path("/config/workspace") + # If does not exist or is empty + if not workspace_path.exists() or not any(Path(workspace_path).iterdir()): + workspace_path.mkdir(parents=True, exist_ok=True) + os.system(f"chown -R abc:abc {workspace_path}") + _run_git_clone(repo_url, workspace_path) + else: + git_path = workspace_path / ".git" + if not git_path.exists(): + with tempfile.TemporaryDirectory() as tmp_dir: + _move_folder_contents(workspace_path, tmp_dir) + _run_git_clone(repo_url, workspace_path) + _move_folder_contents(tmp_dir, workspace_path) + os.system(f"chown -R abc:abc {workspace_path}") + + +if __name__ == "__main__": + repo_clone = os.environ.get("DATACOVES__REPOSITORY_CLONE", "false") + if repo_clone == "true": + repo_url = os.environ["DATACOVES__REPOSITORY_URL"] + fullname = os.environ["DATACOVES__USER_FULLNAME"] + email = os.environ["DATACOVES__USER_EMAIL"] + + git_config(fullname, email) + add_known_hosts(repo_url) + git_clone(repo_url) + else: + print("Repository cloning feature not enabled in profile.") diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/60-profile-files.py b/src/code-server/code-server/datacoves/custom-cont-init.d/60-profile-files.py new file mode 100755 index 00000000..c77db43a --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/60-profile-files.py @@ -0,0 +1,53 @@ +#!/usr/bin/python + +import json +import os +import stat +import subprocess +from pathlib import Path + + +def generate_files(): + """Generate profile files and executes them if configured so""" + + files_path = Path("/opt/datacoves/user/files.json") + cloned_repo = Path("/config/workspace/.git").exists() + + if files_path.exists(): + files = json.load(open(files_path)) + for file in files: + path_output = subprocess.run( + f"echo {file['mount_path']}", shell=True, capture_output=True + ) + mount_path = path_output.stdout.decode().replace("\n", "") + if not mount_path.startswith("/config/workspace") or cloned_repo: + mount_path = Path(mount_path) + if file["override"] or not mount_path.exists(): + mount_path.parent.mkdir(parents=True, exist_ok=True) + os.chown(mount_path.parent, 1000, 1000) + + with open(mount_path, "w") as target_file: + target_file.write(file["content"]) + os.chmod(mount_path, int(file["permissions"], 8)) + os.chown(mount_path, 1000, 1000) + if file["content"].startswith("#!"): + st = os.stat(mount_path) + os.chmod(mount_path, st.st_mode | stat.S_IEXEC) + + if file["execute"]: + print(f"Executing {mount_path}...") + output = subprocess.run( + ["/usr/bin/perl", mount_path], + env=os.environ, + cwd=mount_path.parent, + ) + if output.stderr: + print(output.stdout) + else: + print(f"Generating {mount_path}...") + else: + print(f"Skipping {mount_path} as workspace not initialited yet...") + + +if __name__ == "__main__": + generate_files() diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/90-local-airflow.sh b/src/code-server/code-server/datacoves/custom-cont-init.d/90-local-airflow.sh new file mode 100755 index 00000000..00c14160 --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/90-local-airflow.sh @@ -0,0 +1,14 @@ +#!/bin/bash +LOCAL_AIRFLOW_DIR="/config/local-airflow" + +# Delete old logs if directory exists +if [ -d /config/local-airflow/logs ]; then + find /config/local-airflow/logs -name '*.log' -type f -mtime +30 -delete + find /config/local-airflow/logs -type d -empty -delete +fi + +# Make sure the local airflow directories are created and have the +# correct permissions. +mkdir -p "${LOCAL_AIRFLOW_DIR}/db" +mkdir -p "${LOCAL_AIRFLOW_DIR}/logs" +chown -R 1000:1000 "${LOCAL_AIRFLOW_DIR}" diff --git a/src/code-server/code-server/datacoves/custom-cont-init.d/91-clean-cahe.sh b/src/code-server/code-server/datacoves/custom-cont-init.d/91-clean-cahe.sh new file mode 100755 index 00000000..9042080b --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-cont-init.d/91-clean-cahe.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +# Delete old UV cache +rm -rf /config/.cache/uv/ +rm -rf /config/data/CachedExtensionVSIXs/ diff --git a/src/code-server/code-server/datacoves/custom-services.d/extensions-installer/dependencies.d/svc-code-server b/src/code-server/code-server/datacoves/custom-services.d/extensions-installer/dependencies.d/svc-code-server new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/code-server/datacoves/custom-services.d/extensions-installer/run b/src/code-server/code-server/datacoves/custom-services.d/extensions-installer/run new file mode 100644 index 00000000..3774e814 --- /dev/null +++ b/src/code-server/code-server/datacoves/custom-services.d/extensions-installer/run @@ -0,0 +1,58 @@ +#!/usr/bin/python +import json +import pathlib +import shutil + +print("Moving extensions to /config/extensions...") +extensions_prep = pathlib.Path("/config/extensions-prep") +extensions = pathlib.Path("/config/extensions") + +if extensions_prep.exists(): + # Cleaning up extensions folder (can't delete it since it's being watched by VSCode) + for f in extensions.iterdir(): + if f.is_dir(): + shutil.rmtree(f, ignore_errors=True) + else: + f.unlink() + for f in extensions_prep.iterdir(): + shutil.move(f, f"{extensions}/") + + # Remove extensions-prep + shutil.rmtree(extensions_prep) + + ext_configs = [] + for f in extensions.glob("*/package.json"): + if f.is_file(): + package = json.load(open(f)) + try: + publisher = package["publisher"].lower() + name = package["name"].lower() + version = package["version"] + except: + print(f"Exception processing extension {f}") + raise + + ext_configs.append( + { + "identifier": {"id": f"{publisher}.{name}"}, + "version": version, + "location": { + "$mid": 1, + "fsPath": str(f.parent), + "path": str(f.parent), + "scheme": "file", + }, + "relativeLocation": f.parent.name, + } + ) + + extensions_json_path = extensions / "extensions.json" + with open(extensions_json_path, "w", encoding="utf-8") as f: + json.dump(ext_configs, f, ensure_ascii=False, indent=4) + shutil.chown(extensions_json_path, user="abc", group="abc") + + +from threading import Event + +# Sleep to infinity since this is a service +Event().wait() diff --git a/src/code-server/code-server/datacoves/download_extension.sh b/src/code-server/code-server/datacoves/download_extension.sh new file mode 100755 index 00000000..8b3bdf33 --- /dev/null +++ b/src/code-server/code-server/datacoves/download_extension.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash + +set -e + + +dl() { + curl --fail -JLO --compressed "$1" +} + +url="$1" + +dl "$url" && exit 0 + +echo 'Download failed, retrying in 2 seconds...' +sleep 2 +dl "$url" && exit 0 + +echo 'Download failed, retrying in 5 seconds...' +sleep 5 +dl "$url" && exit 0 + +echo 'Download failed, retrying in 10 seconds...' +sleep 10 +dl "$url" && exit 0 + +echo 'Download failed.' +exit 1 diff --git a/src/code-server/code-server/datacoves/profile/extensions/better-status-bar-1.0.9.vsix b/src/code-server/code-server/datacoves/profile/extensions/better-status-bar-1.0.9.vsix new file mode 100644 index 00000000..ff56f03e Binary files /dev/null and b/src/code-server/code-server/datacoves/profile/extensions/better-status-bar-1.0.9.vsix differ diff --git a/src/code-server/code-server/datacoves/profile/extensions/janisdd.vscode-edit-csv-0.10.0.vsix b/src/code-server/code-server/datacoves/profile/extensions/janisdd.vscode-edit-csv-0.10.0.vsix new file mode 100644 index 00000000..956cae1d Binary files /dev/null and b/src/code-server/code-server/datacoves/profile/extensions/janisdd.vscode-edit-csv-0.10.0.vsix differ diff --git a/src/code-server/code-server/datacoves/profile/extensions/mechatroner.rainbow-csv-3.3.0.vsix b/src/code-server/code-server/datacoves/profile/extensions/mechatroner.rainbow-csv-3.3.0.vsix new file mode 100644 index 00000000..c8b7b6da Binary files /dev/null and b/src/code-server/code-server/datacoves/profile/extensions/mechatroner.rainbow-csv-3.3.0.vsix differ diff --git a/src/code-server/code-server/datacoves/profile/extensions/ms-python.python-2024.14.1.vsix b/src/code-server/code-server/datacoves/profile/extensions/ms-python.python-2024.14.1.vsix new file mode 100644 index 00000000..ee4ff4f0 Binary files /dev/null and b/src/code-server/code-server/datacoves/profile/extensions/ms-python.python-2024.14.1.vsix differ diff --git a/src/code-server/code-server/datacoves/profile/extensions/redhat.vscode-yaml-1.15.0.vsix b/src/code-server/code-server/datacoves/profile/extensions/redhat.vscode-yaml-1.15.0.vsix new file mode 100644 index 00000000..a585a29d Binary files /dev/null and b/src/code-server/code-server/datacoves/profile/extensions/redhat.vscode-yaml-1.15.0.vsix differ diff --git a/src/code-server/code-server/datacoves/profile/extensions/sleistner.vscode-fileutils-3.10.3.vsix b/src/code-server/code-server/datacoves/profile/extensions/sleistner.vscode-fileutils-3.10.3.vsix new file mode 100644 index 00000000..8a169f55 Binary files /dev/null and b/src/code-server/code-server/datacoves/profile/extensions/sleistner.vscode-fileutils-3.10.3.vsix differ diff --git a/src/code-server/code-server/datacoves/profile/extensions/streetsidesoftware.code-spell-checker-3.0.1.vsix b/src/code-server/code-server/datacoves/profile/extensions/streetsidesoftware.code-spell-checker-3.0.1.vsix new file mode 100644 index 00000000..52fafdf5 Binary files /dev/null and b/src/code-server/code-server/datacoves/profile/extensions/streetsidesoftware.code-spell-checker-3.0.1.vsix differ diff --git a/src/code-server/code-server/datacoves/user/README.md b/src/code-server/code-server/datacoves/user/README.md new file mode 100644 index 00000000..9fb6cf9f --- /dev/null +++ b/src/code-server/code-server/datacoves/user/README.md @@ -0,0 +1 @@ +User files/settings are mounted here. diff --git a/src/code-server/code-server/profiles/common-dbt/extensions/charliermarsh.ruff-2024.56.0.vsix b/src/code-server/code-server/profiles/common-dbt/extensions/charliermarsh.ruff-2024.56.0.vsix new file mode 100644 index 00000000..be6bae0a Binary files /dev/null and b/src/code-server/code-server/profiles/common-dbt/extensions/charliermarsh.ruff-2024.56.0.vsix differ diff --git a/src/code-server/code-server/profiles/common-dbt/extensions/datacoves-copilot-0.1.3.vsix b/src/code-server/code-server/profiles/common-dbt/extensions/datacoves-copilot-0.1.3.vsix new file mode 100644 index 00000000..de032f25 Binary files /dev/null and b/src/code-server/code-server/profiles/common-dbt/extensions/datacoves-copilot-0.1.3.vsix differ diff --git a/src/code-server/code-server/profiles/common-dbt/extensions/dorzey.vscode-sqlfluff-3.2.0.vsix b/src/code-server/code-server/profiles/common-dbt/extensions/dorzey.vscode-sqlfluff-3.2.0.vsix new file mode 100644 index 00000000..674c0715 Binary files /dev/null and b/src/code-server/code-server/profiles/common-dbt/extensions/dorzey.vscode-sqlfluff-3.2.0.vsix differ diff --git a/src/code-server/code-server/profiles/common-dbt/extensions/samuelcolvin.jinjahtml-0.20.0.vsix b/src/code-server/code-server/profiles/common-dbt/extensions/samuelcolvin.jinjahtml-0.20.0.vsix new file mode 100644 index 00000000..6a982b7e Binary files /dev/null and b/src/code-server/code-server/profiles/common-dbt/extensions/samuelcolvin.jinjahtml-0.20.0.vsix differ diff --git a/src/code-server/code-server/profiles/common-dbt/extensions/vscode-datacoves-power-user-0.10.1.vsix b/src/code-server/code-server/profiles/common-dbt/extensions/vscode-datacoves-power-user-0.10.1.vsix new file mode 100644 index 00000000..30ca3824 Binary files /dev/null and b/src/code-server/code-server/profiles/common-dbt/extensions/vscode-datacoves-power-user-0.10.1.vsix differ diff --git a/src/code-server/code-server/profiles/dbt-bigquery/python/.gitkeep b/src/code-server/code-server/profiles/dbt-bigquery/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/code-server/profiles/dbt-databricks/python/.gitkeep b/src/code-server/code-server/profiles/dbt-databricks/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/code-server/profiles/dbt-redshift/extensions/mtxr.sqltools-0.28.3.vsix b/src/code-server/code-server/profiles/dbt-redshift/extensions/mtxr.sqltools-0.28.3.vsix new file mode 100644 index 00000000..4dc21bec Binary files /dev/null and b/src/code-server/code-server/profiles/dbt-redshift/extensions/mtxr.sqltools-0.28.3.vsix differ diff --git a/src/code-server/code-server/profiles/dbt-redshift/extensions/mtxr.sqltools-driver-pg-0.5.4.vsix b/src/code-server/code-server/profiles/dbt-redshift/extensions/mtxr.sqltools-driver-pg-0.5.4.vsix new file mode 100644 index 00000000..afd24089 Binary files /dev/null and b/src/code-server/code-server/profiles/dbt-redshift/extensions/mtxr.sqltools-driver-pg-0.5.4.vsix differ diff --git a/src/code-server/code-server/profiles/dbt-redshift/python/.gitkeep b/src/code-server/code-server/profiles/dbt-redshift/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/code-server/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-1.10.5.vsix b/src/code-server/code-server/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-1.10.5.vsix new file mode 100644 index 00000000..5e25f7e3 Binary files /dev/null and b/src/code-server/code-server/profiles/dbt-snowflake/extensions/snowflake.snowflake-vsc-1.10.5.vsix differ diff --git a/src/code-server/code-server/profiles/dbt-snowflake/python/.gitkeep b/src/code-server/code-server/profiles/dbt-snowflake/python/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/code-server/run b/src/code-server/code-server/run new file mode 100755 index 00000000..4fc762b6 --- /dev/null +++ b/src/code-server/code-server/run @@ -0,0 +1,72 @@ +#!/usr/bin/with-contenv bash + +if [ -n "${PASSWORD}" ] || [ -n "${HASHED_PASSWORD}" ]; then + AUTH="password" +else + AUTH="none" + echo "starting with no password" +fi + +if [ -z ${PROXY_DOMAIN+x} ]; then + PROXY_DOMAIN_ARG="" +else + PROXY_DOMAIN_ARG="--proxy-domain=${PROXY_DOMAIN}" +fi + +DATA=$( + printenv | grep DC_CUSTOM__ | sed "s/DC_CUSTOM__//g" | awk '!/^\s*#/' | awk '!/^\s*$/' | while IFS='' read -r line; do + key=$(echo "$line" | cut -d '=' -f 1) + value=$(echo "$line" | cut -d '=' -f 2-) + echo " $key=\"$value\" \\" + done +) + +if [ -z "${DATA}" ]; then + DATA="\\" +fi + +COMMAND_EXEC=" +exec \\ + env -i \\ + HOME=\"$HOME\" \\ + AUTH=\"$AUTH\" \\ + PROXY_DOMAIN_ARG=\"$PROXY_DOMAIN_ARG\" \\ + PUID=\"$PUID\" \\ + PGID=\"$PGID\" \\ + TZ=\"$TZ\" \\ + DBT_HOME=\"$DBT_HOME\" \\ + LOG_LEVEL=\"$LOG_LEVEL\" \\ + KUBERNETES_SERVICE_PORT_HTTPS=\"$KUBERNETES_SERVICE_PORT_HTTPS\" \\ + KUBERNETES_SERVICE_PORT=\"$KUBERNETES_SERVICE_PORT\" \\ + KUBERNETES_PORT_443_TCP=\"$KUBERNETES_PORT_443_TCP\" \\ + KUBERNETES_PORT_443_TCP_PROTO=\"$KUBERNETES_PORT_443_TCP_PROTO\" \\ + KUBERNETES_PORT_443_TCP_ADDR=\"$KUBERNETES_PORT_443_TCP_ADDR\" \\ + KUBERNETES_SERVICE_HOST=\"$KUBERNETES_SERVICE_HOST\" \\ + KUBERNETES_PORT=\"$KUBERNETES_PORT\" \\ + KUBERNETES_PORT_443_TCP_PORT=\"$KUBERNETES_PORT_443_TCP_PORT\" \\ + DATACOVES__DBT_HOME=\"$DATACOVES__DBT_HOME\" \\ + DATACOVES__USER_EMAIL=\"$DATACOVES__USER_EMAIL\" \\ + DATACOVES__USER_FULLNAME=\"$DATACOVES__USER_FULLNAME\" \\ + DATACOVES__REPOSITORY_URL=\"$DATACOVES__REPOSITORY_URL\" \\ + DATACOVES__REPOSITORY_CLONE=\"$DATACOVES__REPOSITORY_CLONE\" \\ + DATACOVES__USER_SLUG=\"$DATACOVES__USER_SLUG\" \\ + DATACOVES__SECRETS_URL=\"$DATACOVES__SECRETS_URL\" \\ + DATACOVES__SECRETS_TOKEN=\"$DATACOVES__SECRETS_TOKEN\" \\ + DATACOVES__SECRETS_PROJECT=\"$DATACOVES__SECRETS_PROJECT\" \\ + DATACOVES__AIRBYTE_HOST_NAME=\"$DATACOVES__AIRBYTE_HOST_NAME\" \\ + DATACOVES__AIRBYTE_PORT=\"$DATACOVES__AIRBYTE_PORT\" \\ + DATACOVES__AIRFLOW_DAGS_YML_PATH=\"$DATACOVES__AIRFLOW_DAGS_YML_PATH\" \\ + DATACOVES__AIRFLOW_DAGS_PATH=\"$DATACOVES__AIRFLOW_DAGS_PATH\" \\ + DATACOVES__ENVIRONMENT_SLUG=\"$DATACOVES__ENVIRONMENT_SLUG\" \\ + DATACOVES__API_ENDPOINT=\"$DATACOVES__API_ENDPOINT\" \\ + DATACOVES__API_TOKEN=\"$DATACOVES__API_TOKEN\" \\ + DATACOVES__PROJECT_SLUG=\"$DATACOVES__PROJECT_SLUG\" \\ + DATACOVES__AIRFLOW_DBT_PROFILE_PATH=\"$DATACOVES__AIRFLOW_DBT_PROFILE_PATH\" \\ + DATACOVES__REPO_PATH=\"$DATACOVES__REPO_PATH\" \\ + DATACOVES__DBT_ADAPTER=\"$DATACOVES__DBT_ADAPTER\" \\ + UV_CACHE_DIR=\"/tmp/uv\" \\ +${DATA} + bash -l -c \"s6-notifyoncheck -d -n 300 -w 1000 -c 'nc -z 127.0.0.1 8443' s6-setuidgid abc /app/code-server/bin/code-server --bind-addr 0.0.0.0:8443 --user-data-dir /config/data --extensions-dir /config/extensions --disable-telemetry --disable-update-check --auth ${AUTH} ${PROXY_DOMAIN_ARG} ${DEFAULT_WORKSPACE:-/config/workspace}\" +" + +eval "$COMMAND_EXEC" diff --git a/src/code-server/dbt-core-interface/Dockerfile b/src/code-server/dbt-core-interface/Dockerfile new file mode 100755 index 00000000..94433b00 --- /dev/null +++ b/src/code-server/dbt-core-interface/Dockerfile @@ -0,0 +1,85 @@ +FROM python:3.10 AS base +LABEL com.datacoves.from=python:3.10 +LABEL com.datacoves.version.dbt-core-interface=0.2.16 +LABEL com.datacoves.library.dbt-core-interface.psycopg2-binary=2.9.9 +LABEL com.datacoves.library.dbt-core-interface.snowflake-connector-python[pandas]=3.14.0 +LABEL com.datacoves.library.dbt-core-interface.snowflake-snowpark-python=1.25.0 +LABEL com.datacoves.library.dbt-core-interface.acryl-datahub=0.15.0.5 +LABEL com.datacoves.library.dbt-core-interface.dbt-core=1.9.0 +LABEL com.datacoves.library.dbt-core-interface.protobuf=5.29.3 +LABEL com.datacoves.library.dbt-core-interface.dbt-databricks=1.9.0 +LABEL com.datacoves.library.dbt-core-interface.dbt-snowflake=1.9.0 +LABEL com.datacoves.library.dbt-core-interface.dbt-redshift=1.9.0 +LABEL com.datacoves.library.dbt-core-interface.dbt-bigquery=1.9.0 +LABEL com.datacoves.library.dbt-core-interface.dbt-postgres=1.6.3 +LABEL com.datacoves.library.dbt-core-interface.dbt-coves=1.9.5 +LABEL com.datacoves.library.dbt-core-interface.git+https://gitlab.com/datacoves/permifrost.git=v0.15.6 +LABEL com.datacoves.library.dbt-core-interface.pre-commit=3.7.1 +LABEL com.datacoves.library.dbt-core-interface.PyYAML=6.0.2 +LABEL com.datacoves.library.dbt-core-interface.shandy-sqlfmt[jinjafmt]=0.26.0 +LABEL com.datacoves.library.dbt-core-interface.sqlfluff=3.1.1 +LABEL com.datacoves.library.dbt-core-interface.sqlfluff-templater-dbt=3.1.1 +LABEL com.datacoves.library.dbt-core-interface.rich=14.0.0 +LABEL com.datacoves.library.dbt-core-interface.kubernetes=31.0.0 +LABEL com.datacoves.library.dbt-core-interface.uv=0.4.30 +LABEL com.datacoves.library.dbt-core-interface.ruff=0.8.3 +LABEL com.datacoves.library.dbt-core-interface.snowflake-cli=3.7.1 +LABEL com.datacoves.library.dbt-core-interface.certifi=2025.1.31 + +RUN apt-get update \ + && apt-get install -y \ + vim \ + --no-install-recommends + +ENV PYTHONDONTWRITEBYTECODE=1 +ENV PYTHONUNBUFFERED=1 + +RUN pip install uv && \ + uv pip install --system git+https://github.com/datacoves/dbt-core-interface.git@v0.2.16 + +# Set user and group +ARG uid=1000 +ARG user=abc + +ARG gid=1000 +ARG group=abc + +RUN groupadd -g ${gid} ${group} +RUN useradd -u ${uid} -g ${group} -s /bin/bash -m ${user} + +WORKDIR /usr/src +COPY src/ /usr/src + +EXPOSE 8581 + +ENTRYPOINT ["/usr/src/bin/entrypoint.sh"] + +FROM base as dbt-snowflake + +COPY profiles/dbt-snowflake ./ +# FIXME: We should not install and then uninstall a library :( +RUN uv pip install --system -r dbt-snowflake.txt && pip uninstall -y sqlfluff-templater-dbt \ + && uv pip uninstall --system oscrypto && uv pip install --system oscrypto@git+https://github.com/wbond/oscrypto.git@d5f3437ed24257895ae1edd9e503cfb352e635a8 \ + && /usr/src/bin/set_adapters_app.sh all /usr/local/lib + +FROM base as dbt-redshift + +COPY profiles/dbt-redshift ./ +# FIXME: We should not install and then uninstall a library :( +RUN uv pip install --system -r dbt-redshift.txt && uv pip uninstall --system sqlfluff-templater-dbt \ + && /usr/src/bin/set_adapters_app.sh all /usr/local/lib + +FROM base as dbt-bigquery + +COPY profiles/dbt-bigquery ./ +# FIXME: We should not install and then uninstall a library :( +RUN uv pip install --system -r dbt-bigquery.txt && uv pip uninstall --system sqlfluff-templater-dbt \ + && /usr/src/bin/set_adapters_app.sh bigquery /usr/local/lib --skip-validation + +FROM base as dbt-databricks + +COPY profiles/dbt-databricks ./ +# FIXME: We should not install and then uninstall a library :( +RUN uv pip install --system -r dbt-databricks.txt && uv pip uninstall --system sqlfluff-templater-dbt \ + && /usr/src/bin/set_adapters_app.sh databricks /usr/local/lib --skip-validation \ + && /usr/src/bin/set_adapters_app.sh spark /usr/local/lib --skip-validation diff --git a/src/code-server/dbt-core-interface/profiles/dbt-bigquery/.gitkeep b/src/code-server/dbt-core-interface/profiles/dbt-bigquery/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/dbt-core-interface/profiles/dbt-databricks/.gitkeep b/src/code-server/dbt-core-interface/profiles/dbt-databricks/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/dbt-core-interface/profiles/dbt-redshift/.gitkeep b/src/code-server/dbt-core-interface/profiles/dbt-redshift/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/dbt-core-interface/profiles/dbt-snowflake/.gitkeep b/src/code-server/dbt-core-interface/profiles/dbt-snowflake/.gitkeep new file mode 100644 index 00000000..e69de29b diff --git a/src/code-server/dbt-core-interface/src/bin/entrypoint.sh b/src/code-server/dbt-core-interface/src/bin/entrypoint.sh new file mode 100755 index 00000000..4ef65b1f --- /dev/null +++ b/src/code-server/dbt-core-interface/src/bin/entrypoint.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +export PROJECT_DIR=$CODE_HOME/$DBT_HOME +export PROFILES_DIR=$CODE_HOME/.dbt + +# Replacing default app name in python libraries. +# /usr/src/bin/set_adapters_app.sh all /usr/local/lib +# /usr/src/bin/set_adapters_app.sh bigquery /usr/local/lib --skip-validation +# /usr/src/bin/set_adapters_app.sh databricks /usr/local/lib --skip-validation +# /usr/src/bin/set_adapters_app.sh spark /usr/local/lib --skip-validation + +while ! [ -d $PROJECT_DIR ]; do sleep 5; done + +cd $PROJECT_DIR + +while true; do + dbt_interface_pid=$(pgrep -u abc -f "python -m dbt_core_interface.project") + if [ -n "$dbt_interface_pid" ]; then + kill -9 $dbt_interface_pid + fi + su abc -c "python -m dbt_core_interface.project --host 0.0.0.0 --port 8581" + echo "Reload dbt-core-interface server..." + sleep 1 +done diff --git a/src/common/plugins/notifiers/__init__.py b/src/common/plugins/notifiers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/plugins/notifiers/datacoves/__init__.py b/src/common/plugins/notifiers/datacoves/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/plugins/notifiers/datacoves/ms_teams.py b/src/common/plugins/notifiers/datacoves/ms_teams.py new file mode 100644 index 00000000..6b7abcb6 --- /dev/null +++ b/src/common/plugins/notifiers/datacoves/ms_teams.py @@ -0,0 +1,182 @@ +import os +import urllib +from functools import cached_property + +from airflow.exceptions import AirflowException +from airflow.notifications.basenotifier import BaseNotifier +from airflow.providers.http.hooks.http import HttpHook + +AIRFLOW_URL = os.environ.get("AIRFLOW__WEBSERVER__BASE_URL") + + +class MSTeamsHook(HttpHook): + """ + This hook allows you to post messages to MS Teams using the Incoming Webhook connector. + + Takes both MS Teams webhook token directly and connection that has MS Teams webhook token. + If both supplied, the webhook token will be appended to the host in the connection. + + :param http_conn_id: connection that has MS Teams webhook URL + :type http_conn_id: str + :param webhook_token: MS Teams webhook token + :type webhook_token: str + :param message: The message you want to send on MS Teams + :type message: str + :param subtitle: The subtitle of the message to send + :type subtitle: str + :param button_text: The text of the action button + :type button_text: str + :param button_url: The URL for the action button click + :type button_url : str + :param theme_color: Hex code of the card theme, without the # + :type message: str + :param proxy: Proxy to use when making the webhook request + :type proxy: str + + """ + + default_conn_name = "ms_teams" + conn_type = "http" + hook_name = "MS Teams" + + def __init__( + self, + http_conn_id=default_conn_name, + webhook_token=None, + message="", + subtitle="", + button_text="", + button_url="", + theme_color="00FF00", + proxy=None, + *args, + **kwargs, + ): + super(MSTeamsHook, self).__init__(*args, **kwargs) + self.http_conn_id = http_conn_id + self.webhook_token = self.get_token(webhook_token, http_conn_id) + self.message = message + self.subtitle = subtitle + self.button_text = button_text + self.button_url = button_url + self.theme_color = theme_color + self.proxy = proxy + + def get_proxy(self, http_conn_id): + conn = self.get_connection(http_conn_id) + extra = conn.extra_dejson + return extra.get("proxy", "") + + def get_token(self, token, http_conn_id): + """ + Given either a manually set token or a conn_id, return the webhook_token to use + :param token: The manually provided token + :param conn_id: The conn_id provided + :return: webhook_token (str) to use + """ + if token: + return token + elif http_conn_id: + conn = self.get_connection(http_conn_id) + extra = conn.extra_dejson + return extra.get("webhook_token", "") + else: + raise AirflowException( + "Cannot get URL: No valid MS Teams " "webhook URL nor conn_id supplied" + ) + + def build_message(self): + cardjson = """ + {{ + "@type": "MessageCard", + "@context": "http://schema.org/extensions", + "themeColor": "{3}", + "summary": "{0}", + "sections": [{{ + "activityTitle": "{1}", + "activitySubtitle": "{2}", + "markdown": true, + "potentialAction": [ + {{ + "@type": "OpenUri", + "name": "{4}", + "targets": [ + {{ "os": "default", "uri": "{5}" }} + ] + }} + ] + }}] + }} + """ + return cardjson.format( + self.message, + self.message, + self.subtitle, + self.theme_color, + self.button_text, + self.button_url, + ) + + def execute(self): + """ + Remote Popen (actually execute the webhook call) + + :param cmd: command to remotely execute + :param kwargs: extra arguments to Popen (see subprocess.Popen) + """ + proxies = {} + proxy_url = self.get_proxy(self.http_conn_id) + if len(proxy_url) > 5: + proxies = {"https": proxy_url} + self.run( + endpoint=self.webhook_token, + data=self.build_message(), + headers={"Content-type": "application/json"}, + extra_options={"proxies": proxies}, + ) + + +class MSTeamsNotifier(BaseNotifier): + template_fields = ("message",) + + def __init__( + self, + message: str = None, + connection_id: str = MSTeamsHook.default_conn_name, + title: str = "Airflow DAG Status", + subtitle: str = "", + button_text: str = "Logs", + button_url: str = "", + theme_color: str = "AAAAAA", + proxy: str = None, + ): + super().__init__() + self.http_conn_id = connection_id + self.message = message + self.title = title + self.subtitle = subtitle + self.button_text = button_text + self.button_url = button_url + self.theme_color = theme_color + self.proxy = proxy + + @cached_property + def hook(self) -> MSTeamsHook: + return MSTeamsHook( + http_conn_id=self.http_conn_id, + ) + + def notify(self, context): + dag_id = context["dag_run"].dag_id + task_id = context["task_instance"].task_id + context["task_instance"].xcom_push(key=dag_id, value=True) + timestamp = context["ts"] + urlencoded_timestamp = urllib.parse.quote(timestamp) + logs_url = "{}/log?dag_id={}&task_id={}&execution_date={}".format( + AIRFLOW_URL, dag_id, task_id, urlencoded_timestamp + ) + self.hook.message = self.message + self.hook.button_text = self.button_text + self.hook.theme_color = self.theme_color + self.hook.button_url = logs_url + self.hook.execute() diff --git a/src/common/plugins/operators/__init__.py b/src/common/plugins/operators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/plugins/operators/datacoves/__init__.py b/src/common/plugins/operators/datacoves/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/plugins/operators/datacoves/bash.py b/src/common/plugins/operators/datacoves/bash.py new file mode 100644 index 00000000..a5f2c0b4 --- /dev/null +++ b/src/common/plugins/operators/datacoves/bash.py @@ -0,0 +1,167 @@ +import os +import subprocess +import sys +from pathlib import Path +from typing import Any, Dict, Sequence + +import requests + +from airflow.exceptions import AirflowException +from airflow.operators.bash_operator import BashOperator +from airflow.utils.context import Context + +DATACOVES_DEFAULT_VENV = "/opt/datacoves/virtualenvs/main" + + +class DatacovesBashException(Exception): + pass + + +class DatacovesBashOperator(BashOperator): + """ + Base DatacovesBash class + Copy the entire Datacoves repo to a temporary directory, then run the command within that directory. + If no virtual environment is specified, Datacoves default virtual environment is used. + If the virtual environment does not exist, the command is run without a virtual environment. + """ + + template_fields: Sequence[str] = ( + "bash_command", + "env", + "cwd", + "virtualenv", + "activate_venv", + ) + template_fields_renderers = {"bash_command": "bash", "env": "json"} + template_ext: Sequence[str] = (".sh", ".bash") + + @classmethod + def _get_full_command(cls, command, virtualenv=None): + if cls.__name__ == "_DatacovesDbtDecoratedOperator": + if not Path(os.environ["DATACOVES__DBT_HOME"], "dbt_packages").exists(): + command = f"dbt deps && {command}" + if virtualenv and Path(virtualenv).exists(): + full_command = f"source {virtualenv}/bin/activate && {command}" + else: + activate_cmd = ( + f"source {DATACOVES_DEFAULT_VENV}/bin/activate && " + if Path(DATACOVES_DEFAULT_VENV).exists() + else "" + ) + full_command = f"{activate_cmd}{command}" + return full_command + + def __init__( + self, + bash_command: str = "", + virtualenv: str = None, + activate_venv: bool = True, + cwd: str = None, + upload_manifest: bool = False, + *args, + **kwargs, + ): + """ + :param virtualenv: The path to the virtual environment. + :type virtualenv: str + :param activate_venv: Whether to activate the virtualenv + :type activate_venv: bool + :param bash_command: The bash command to run. + :type bash_command: str + :param cwd: The current working directory to run the command, relative to repo root, i.e: 'transform'. + :type cwd: str + """ + self.virtualenv = virtualenv + self.cwd = cwd + self.activate_venv = activate_venv + self.upload_manifest = upload_manifest + self.bash_command = bash_command + if activate_venv: + full_command = self._get_full_command(bash_command, virtualenv) + else: + full_command = bash_command + + # If there are null (None) environment variables, break + if "env" in kwargs: + self.check_env(kwargs["env"]) + + super().__init__(bash_command=full_command, cwd=cwd, *args, **kwargs) + + def execute(self, context: Context, perform_copy=True): + if self.env: + self.check_env(self.env) + if perform_copy: + readonly_repo = os.environ["DATACOVES__REPO_PATH_RO"] + destination = os.environ["DATACOVES__REPO_PATH"] + subprocess.run(["mkdir", "-p", destination], check=False) + subprocess.run( + ["cp", "-rfT", f"{readonly_repo}/", destination], check=False + ) + # Add destination to the Python path + os.environ["PYTHONPATH"] = f"{destination}:{os.environ.get('PYTHONPATH')}" + sys.path.append(destination) + if self.cwd: + self.cwd = f"{destination}/{self.cwd}" + else: + self.cwd = destination + result = super().execute(context) + + if self.upload_manifest and self._get_env("DATACOVES__UPLOAD_MANIFEST"): + self._upload_manifest() + return result + + def _upload_manifest(self): + manifest_path = Path(self.cwd, "target/manifest.json") + + if os.path.isfile(manifest_path): + url = self._get_env("DATACOVES__UPLOAD_MANIFEST_URL") + bearer_token = self._get_env("DATACOVES__UPLOAD_MANIFEST_TOKEN") + env_slug = self._get_env("DATACOVES__ENVIRONMENT_SLUG") + run_id = self._get_env("AIRFLOW_CTX_DAG_RUN_ID") + dag_id = self._get_env("AIRFLOW_CTX_DAG_ID") + print(f"Uploading manifest for DAGRun {run_id} in Environment {env_slug}") + with open(manifest_path, "r") as file: + contents = file.read() + headers = {"Authorization": f"Bearer {bearer_token}"} + payload = { + "environment_slug": env_slug, + "run_id": run_id, + "dag_id": dag_id, + } + files = {"file": contents} + res = requests.post( + url, headers=headers, data=payload, files=files, timeout=600.0 + ) + + if res.ok: + print("Manifest upload successful") + else: + raise AirflowException( + f"Manifest upload failed with code {res.status_code}: {res.content}" + ) + + else: + print("Upload manifest enabled but no manifest.json found. Skipping.") + + def _get_env(self, key): + return os.environ.get(key) + + def check_env(self, environment): + # If there are null (None) environment variables, break + null_vars = self._filter_env(environment, None) + if null_vars: + raise DatacovesBashException( + f"Environment variables not set: {', '.join(null_vars)}" + ) + # If there are empty ("") environment variables, warn the user in Logs + empty_vars = self._filter_env(environment, "") + if empty_vars: + print(f"Environment variables not set: {', '.join(empty_vars)}") + + def _filter_env(self, environment: Dict[str, Any], filter_value): + vars = [] + if filter_value is None: + vars = [key for key, value in environment.items() if value is None] + else: + vars = [key for key, value in environment.items() if value == filter_value] + return vars diff --git a/src/common/plugins/operators/datacoves/data_sync.py b/src/common/plugins/operators/datacoves/data_sync.py new file mode 100644 index 00000000..913af497 --- /dev/null +++ b/src/common/plugins/operators/datacoves/data_sync.py @@ -0,0 +1,200 @@ +import json +import os + +from operators.datacoves.bash import DatacovesBashOperator + +from airflow.hooks.base import BaseHook + +DEFAULT_AIRFLOW_TABLES = [ + "ab_permission", + "ab_role", + "ab_user", + "dag", + "dag_run", + "dag_tag", + "import_error", + "job", + "task_fail", + "task_instance", +] + + +class DatacovesDataSyncOperator(DatacovesBashOperator): + """ + Extract data from a source and load into a destination by calling `dbt-coves data-sync` + """ + + template_fields = ("service_connection_name", "tables") + + def __init__( + self, + destination_type: str, + tables: list = DEFAULT_AIRFLOW_TABLES, + additional_tables: list = [], + destination_schema: str = "", + connection_id: str = "", + service_connection_name: str = "", + *args, + **kwargs, + ) -> None: + """ + destination_type: indicates destination: i.e. snowflake. + service_connection_name: defined in the Datacoves environment. + Destination of the data sync. + """ + if not destination_type: + raise ValueError( + "DatacovesDataSyncOperator is not meant to be used directly", + "Use DatacovesDataSyncOperatorSnowflake or DatacovesDataSyncOperatorRedshift variants instead", + ) + if service_connection_name and connection_id: + raise ValueError( + "Only one of 'service_connection_name' or 'connection_id' should be provided" + ) + if connection_id: + self.airflow_connection_name = connection_id + else: + self.airflow_connection_name = None + self.service_connection_name = service_connection_name or "load_airflow" + self.destination_type = destination_type + if additional_tables: + tables += additional_tables + self.tables = list(set(tables)) + # Construct environment variables needed by `dbt-coves data-sync` + source_database = destination_schema or self._get_source_database() + command = f"dbt-coves data-sync {destination_type} --source {source_database}" + tables_quoted = f'"{",".join(self.tables)}"' + command += f" --tables {tables_quoted}" + task_id = ( + f"data_sync_airflow_to_{connection_id}" + if connection_id + else f"data_sync_airflow_to_{self.service_connection_name}" + ) + super().__init__( + task_id=task_id, + bash_command=command, + ) + + def execute(self, context): + self.env = self._get_env_for_data_sync() + self.append_env = True + return super().execute(context) + + def _get_env_for_data_sync(self) -> dict: + raise NotImplementedError + + def _get_airflow_db_conn_string(self) -> str: + return os.environ.get("AIRFLOW__DATABASE__SQL_ALCHEMY_CONN") + + def _get_source_database(self) -> str: + full_url = os.environ.get("AIRFLOW__WEBSERVER__BASE_URL") + # https://airflow-. => split to get 'airflow-' + value = full_url.split("://")[1].split(".")[0] + return value + + +class DatacovesDataSyncOperatorSnowflake(DatacovesDataSyncOperator): + def __init__( + self, + tables: list = DEFAULT_AIRFLOW_TABLES, + additional_tables: list = [], + destination_schema="", + *args, + **kwargs, + ) -> None: + self.fields = [ + "PASSWORD", + "SCHEMA", + ] + self.extra_fields = ["ACCOUNT", "WAREHOUSE", "DATABASE", "ROLE"] + super().__init__( + destination_type="snowflake", + tables=tables, + additional_tables=additional_tables, + destination_schema=destination_schema, + *args, + **kwargs, + ) + + def _load_env_vars_from_airflow_connection( + self, env: dict, datasync_prefix: str + ) -> dict: + """Load environment variables from Airflow Connection""" + conn = BaseHook.get_connection(self.airflow_connection_name) + conn_extra_fields = json.loads(conn.extra) + for key in self.fields: + env[f"{datasync_prefix}{key}"] = getattr(conn, key.lower()) + for key in self.extra_fields: + env[f"{datasync_prefix}{key}"] = conn_extra_fields.get(key.lower(), "") + env[f"{datasync_prefix}USER"] = getattr(conn, "login") + return env + + def _get_env_for_data_sync(self) -> dict: + """Define env variables for dbt-coves data-sync""" + datasync_prefix = "DATA_SYNC_SNOWFLAKE_" + env = { + "DATA_SYNC_SOURCE_CONNECTION_STRING": self._get_airflow_db_conn_string(), + f"{datasync_prefix}TYPE": "SNOWFLAKE", + } + # we can either retrieve them from Airflow Connection or environment variables + if self.airflow_connection_name: + env = self._load_env_vars_from_airflow_connection(env, datasync_prefix) + else: + datacoves_prefix = f"DATACOVES__{self.service_connection_name.upper()}__" + for key in [ + "ACCOUNT", + "DATABASE", + "PASSWORD", + "ROLE", + "SCHEMA", + "USER", + "WAREHOUSE", + ]: + env[f"{datasync_prefix}{key}"] = os.environ.get( + f"{datacoves_prefix}{key}", "" + ) + return env + + +class DatacovesDataSyncOperatorRedshift(DatacovesDataSyncOperator): + def __init__( + self, + tables: list = DEFAULT_AIRFLOW_TABLES, + additional_tables: list = [], + destination_schema="", + *args, + **kwargs, + ) -> None: + self.fields = ["HOST", "PASSWORD"] + super().__init__( + destination_type="redshift", + tables=tables, + additional_tables=additional_tables, + destination_schema=destination_schema, + *args, + **kwargs, + ) + + def _get_env_for_data_sync(self) -> dict: + """Define env variables for dbt-coves data-sync""" + env = {"DATA_SYNC_SOURCE_CONNECTION_STRING": self._get_airflow_db_conn_string()} + datacoves_prefix = f"DATACOVES__{self.service_connection_name.upper()}__" + datasync_prefix = "DATA_SYNC_REDSHIFT_" + if self.airflow_connection_name: + conn = BaseHook.get_connection(self.airflow_connection_name) + for key in self.fields: + env[f"{datasync_prefix}{key}"] = getattr(conn, key.lower()) + env[f"{datasync_prefix}DATABASE"] = getattr(conn, "schema") + env[f"{datasync_prefix}USER"] = getattr(conn, "login") + else: + for key in [ + "DATABASE", + "PASSWORD", + "SCHEMA", + "USER", + "HOST", + ]: + env[f"{datasync_prefix}{key}"] = os.environ.get( + f"{datacoves_prefix}{key}", "" + ) + return env diff --git a/src/common/plugins/operators/datacoves/dbt.py b/src/common/plugins/operators/datacoves/dbt.py new file mode 100644 index 00000000..29b618ca --- /dev/null +++ b/src/common/plugins/operators/datacoves/dbt.py @@ -0,0 +1,67 @@ +import os +from pathlib import Path +from typing import Sequence + +from operators.datacoves.bash import DatacovesBashOperator + + +class DatacovesDbtOperator(DatacovesBashOperator): + """ + Copy the entire Datacoves repo to a temporary directory + Look for dbt project inside that temp and use as 'cwd' + Run 'dbt deps' and 'command' in 'cwd' (activate virtualenv passed) + """ + + template_fields = ( + "bash_command", + "env", + "virtualenv", + "project_dir", + ) + template_fields_renderers = {"bash_command": "bash", "env": "json"} + template_ext: Sequence[str] = (".sh", ".bash") + + def __init__( + self, + project_dir: str = None, + virtualenv: str = None, + bash_command: str = "", + run_dbt_deps: bool = False, + upload_manifest=True, + *args, + **kwargs, + ): + """ + :param project_dir: optional relative path of the project to use + (it'll be discovered using Datacoves Repo and Project environment variables) + :type project_dir: str + :param virtualenv: optional path to a virtual environment. + (Datacoves default Airflow virtualenv will be used instead) + :type virtualenv: str + :param bash_command: The bash command to run. + :type bash_command: str + :param run_dbt_deps: Whether to force dbt deps running + :type run_dbt_deps: bool + """ + self.bash_command = bash_command + if project_dir: + self.project_dir = Path(project_dir) + else: + # relpath from repo root to dbt project + self.project_dir = Path(os.environ["DATACOVES__DBT_HOME"]).relative_to( + os.environ["DATACOVES__REPO_PATH_RO"] + ) + if ( + run_dbt_deps + or not Path(os.environ["DATACOVES__DBT_HOME"], "dbt_packages").exists() + ): + bash_command = f"dbt deps && {bash_command}" + + super().__init__( + bash_command=bash_command, + virtualenv=virtualenv, + cwd=self.project_dir, + upload_manifest=upload_manifest, + *args, + **kwargs, + ) diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/__init__.py b/src/common/providers/datacoves/datacoves_airflow_provider/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/decorators/__init__.py b/src/common/providers/datacoves/datacoves_airflow_provider/decorators/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/decorators/bash.py b/src/common/providers/datacoves/datacoves_airflow_provider/decorators/bash.py new file mode 100644 index 00000000..9b0fee70 --- /dev/null +++ b/src/common/providers/datacoves/datacoves_airflow_provider/decorators/bash.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import warnings +from typing import Any, Callable, Collection, Mapping, Sequence + +from operators.datacoves.bash import DatacovesBashOperator + +from airflow.decorators.base import ( + DecoratedOperator, + TaskDecorator, + task_decorator_factory, +) +from airflow.utils.context import Context, context_merge +from airflow.utils.operator_helpers import determine_kwargs +from airflow.utils.types import NOTSET + + +class _DatacovesBashDecoratedOperator(DecoratedOperator, DatacovesBashOperator): + """ + Wraps a Python callable and uses the callable return value as the Bash command to be executed. + + :param python_callable: A reference to an object that is callable. + :param op_kwargs: A dictionary of keyword arguments that will get unpacked + in your function (templated). + :param op_args: A list of positional arguments that will get unpacked when + calling your callable (templated). + """ + + template_fields: Sequence[str] = ( + *DecoratedOperator.template_fields, + *DatacovesBashOperator.template_fields, + ) + template_fields_renderers: dict[str, str] = { + **DecoratedOperator.template_fields_renderers, + **DatacovesBashOperator.template_fields_renderers, + } + + custom_operator_name: str = "@task.datacoves_bash" + + def __init__( + self, + *, + python_callable: Callable, + op_args: Collection[Any] | None = None, + op_kwargs: Mapping[str, Any] | None = None, + **kwargs, + ) -> None: + if kwargs.pop("multiple_outputs", None): + warnings.warn( + f"`multiple_outputs=True` is not supported in {self.custom_operator_name} tasks. Ignoring.", + UserWarning, + stacklevel=3, + ) + + super().__init__( + python_callable=python_callable, + op_args=op_args, + op_kwargs=op_kwargs, + bash_command=NOTSET, + multiple_outputs=False, + **kwargs, + ) + + def execute(self, context: Context) -> Any: + context_merge(context, self.op_kwargs) + kwargs = determine_kwargs(self.python_callable, self.op_args, context) + bash_command = self.python_callable(*self.op_args, **kwargs) + if not isinstance(bash_command, str) or bash_command.strip() == "": + raise TypeError( + "The returned value from the TaskFlow callable must be a non-empty string." + ) + self.bash_command = self._get_full_command( + command=bash_command, virtualenv=None + ) + + return super().execute(context) + + +def datacoves_bash_task( + python_callable: Callable | None = None, + **kwargs, +) -> TaskDecorator: + """ + Wrap a function into a BashOperator. + + Accepts kwargs for operator kwargs. Can be reused in a single DAG. This function is only used only used + during type checking or auto-completion. + + :param python_callable: Function to decorate. + + :meta private: + """ + return task_decorator_factory( + python_callable=python_callable, + decorated_operator_class=_DatacovesBashDecoratedOperator, + **kwargs, + ) diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/decorators/data_sync.py b/src/common/providers/datacoves/datacoves_airflow_provider/decorators/data_sync.py new file mode 100644 index 00000000..72a578d3 --- /dev/null +++ b/src/common/providers/datacoves/datacoves_airflow_provider/decorators/data_sync.py @@ -0,0 +1,101 @@ +from __future__ import annotations + +from typing import Callable, Sequence + +from operators.datacoves.data_sync import ( + DatacovesDataSyncOperatorRedshift, + DatacovesDataSyncOperatorSnowflake, +) + +from airflow.decorators.base import ( + DecoratedOperator, + TaskDecorator, + task_decorator_factory, +) + + +class _DatacovesDataSyncSnowflakeDecoratedOperator( + DecoratedOperator, DatacovesDataSyncOperatorSnowflake +): + """ + Wraps a Python callable and uses the callable return value as the Bash command + + :param python_callable: A reference to an object that is callable. + :param op_kwargs: A dictionary of keyword arguments that will get unpacked + in your function (templated). + :param op_args: A list of positional arguments that will get unpacked when + calling your callable (templated). + """ + + template_fields: Sequence[str] = ( + *DecoratedOperator.template_fields, + *DatacovesDataSyncOperatorSnowflake.template_fields, + ) + template_fields_renderers: dict[str, str] = { + **DecoratedOperator.template_fields_renderers, + **DatacovesDataSyncOperatorSnowflake.template_fields_renderers, + } + + custom_operator_name: str = "@task.datacoves_data_sync_snowflake" + service_connection_name = "" + + +class _DatacovesDataSyncRedshiftDecoratedOperator( + DecoratedOperator, DatacovesDataSyncOperatorRedshift +): + """ + Wraps a Python callable and uses the callable return value as the Bash command + + :param python_callable: A reference to an object that is callable. + :param op_kwargs: A dictionary of keyword arguments that will get unpacked + in your function (templated). + :param op_args: A list of positional arguments that will get unpacked when + calling your callable (templated). + """ + + template_fields: Sequence[str] = ( + *DecoratedOperator.template_fields, + *DatacovesDataSyncOperatorRedshift.template_fields, + ) + template_fields_renderers: dict[str, str] = { + **DecoratedOperator.template_fields_renderers, + **DatacovesDataSyncOperatorRedshift.template_fields_renderers, + } + + custom_operator_name: str = "@task.datacoves_data_sync_redshift" + service_connection_name = "" + + +def datacoves_data_sync_task( + db_type: str, + destination_schema: str = "", + additional_tables: list[str] = [], + python_callable: Callable | None = None, + **kwargs, +) -> TaskDecorator: + """ + Wrap a function into a BashOperator. + + Accepts kwargs for operator kwargs. Can be reused in a single DAG. This function is only used only used + during type checking or auto-completion. + + :param python_callable: Function to decorate. + + :meta private: + """ + if db_type.lower() not in ["snowflake", "redshift"]: + raise ValueError( + "db_type is required and must be either 'snowflake' or 'redshift'" + ) + else: + return task_decorator_factory( + destination_schema=destination_schema, + additional_tables=additional_tables, + python_callable=python_callable, + decorated_operator_class=( + _DatacovesDataSyncSnowflakeDecoratedOperator + if db_type.lower() == "snowflake" + else _DatacovesDataSyncRedshiftDecoratedOperator + ), + **kwargs, + ) diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/decorators/dbt.py b/src/common/providers/datacoves/datacoves_airflow_provider/decorators/dbt.py new file mode 100644 index 00000000..ece79d3f --- /dev/null +++ b/src/common/providers/datacoves/datacoves_airflow_provider/decorators/dbt.py @@ -0,0 +1,405 @@ +from __future__ import annotations + +import json +import os +import subprocess +import sys +import warnings +from pathlib import Path +from typing import Any, Callable, Collection, Mapping, Sequence + +import yaml +from datacoves_airflow_provider.utils.dbt_api import DatacovesDbtAPI +from operators.datacoves.dbt import DatacovesDbtOperator + +from airflow.decorators.base import ( + DecoratedOperator, + TaskDecorator, + task_decorator_factory, +) +from airflow.hooks.base import BaseHook +from airflow.models.connection import Connection +from airflow.utils.context import Context, context_merge +from airflow.utils.operator_helpers import determine_kwargs +from airflow.utils.types import NOTSET + + +class GenerateDbtProfiles: + """Class to generate DBT's profiles.yml""" + + @classmethod + def generate( + cls, airflow_connection_name: str, target: str = "default", overrides: dict = {} + ) -> str: + conn = BaseHook.get_connection(airflow_connection_name) + + if not conn: + raise RuntimeError( + f"Airflow connection ID {airflow_connection_name} not found" + ) + + if not hasattr(cls, f"generate_{conn.conn_type}"): + raise RuntimeError(f"Connection type {conn.conn_type} not yet supported.") + + new_connection = getattr(cls, f"generate_{conn.conn_type}")(conn) + new_connection.update(overrides) + + profile = { + os.environ.get("DATACOVES__DBT_PROFILE", "default"): { + "outputs": { + target: new_connection, + }, + "target": target, + } + } + + return str(yaml.dump(profile, indent=4)) + + @classmethod + def generate_snowflake(cls, conn: Connection) -> dict: + extra = conn.extra_dejson + + ret = { + "type": "snowflake", + "account": extra.get("account", ""), + "warehouse": extra.get("warehouse", ""), + "database": extra.get("database", ""), + "role": extra.get("role", ""), + "schema": conn.schema, + "user": conn.login, + "threads": 16, + } + + if conn.host: + ret["host"] = conn.host + + if conn.port: + ret["port"] = conn.port + + if "private_key_file" in extra: + ret["private_key_path"] = extra["private_key_file"] + elif "private_key_content" in extra: + ret["private_key"] = extra["private_key_content"] + else: + ret["password"] = conn.password + + if "mfa_protected" in extra and extra["mfa_protected"]: + ret["authenticator"] = "username_password_mfa" + + return ret + + @classmethod + def generate_redshift(cls, conn: Connection) -> dict: + extra = conn.extra_dejson + + return { + "type": "redshift", + "host": conn.host, + "user": conn.login, + "password": conn.password, + "port": conn.port if conn.port else 5439, + "dbname": conn.schema, + "schema": extra.get("schema", ""), + "threads": 8, + "keepalives_idle": 240, + "connect_timeout": 900, + } + + @classmethod + def generate_databricks(cls, conn: Connection) -> dict: + extra = conn.extra_dejson + + return { + "type": "databricks", + "schema": conn.schema, + "host": conn.host, + "http_path": extra.get("http_path", ""), + "token": conn.password if conn.password else extra.get("token", ""), + "threads": 8, + } + + @classmethod + def generate_gcpbigquery(cls, conn: Connection) -> dict: + extra = conn.extra_dejson + + return { + "type": "bigquery", + "method": "service-account-json", + "project": extra.get("project", ""), + "dataset": extra.get("dataset", ""), + "threads": 8, + "keyfile_json": json.loads(extra.get("keyfile_dict", "{}")), + } + + +class _DatacovesDbtDecoratedOperator(DecoratedOperator, DatacovesDbtOperator): + """ + Wraps a Python callable and uses the callable return value as the Bash command to be executed. + + :param python_callable: A reference to an object that is callable. + :param op_kwargs: A dictionary of keyword arguments that will get unpacked + in your function (templated). + :param op_args: A list of positional arguments that will get unpacked when + calling your callable (templated). + """ + + template_fields: Sequence[str] = ( + *DecoratedOperator.template_fields, + *DatacovesDbtOperator.template_fields, + ) + template_fields_renderers: dict[str, str] = { + **DecoratedOperator.template_fields_renderers, + **DatacovesDbtOperator.template_fields_renderers, + } + + custom_operator_name: str = "@task.datacoves_dbt" + + def __init__( + self, + *, + python_callable: Callable, + op_args: Collection[Any] | None = None, + op_kwargs: Mapping[str, Any] | None = None, + **kwargs, + ) -> None: + if kwargs.pop("multiple_outputs", None): + warnings.warn( + f"`multiple_outputs=True` is not supported in {self.custom_operator_name} tasks. Ignoring.", + UserWarning, + stacklevel=3, + ) + + self.airflow_connection_name = kwargs.pop("connection_id", None) + self.target = kwargs.pop("target", "default") + self.overrides = kwargs.pop("overrides", {}) + self.download_static_artifacts = kwargs.pop("download_static_artifacts", True) + self.download_additional_files = kwargs.pop("download_additional_files", []) + self.upload_static_artifacts = kwargs.pop("upload_static_artifacts", False) + self.upload_additional_files = kwargs.pop("upload_additional_files", []) + self.upload_tag = kwargs.pop("upload_tag", None) + base_url_internal = os.getenv("DATACOVES__UPLOAD_MANIFEST_URL") + self.base_url = base_url_internal + self.environment_slug = os.getenv("DATACOVES__ENVIRONMENT_SLUG") + self.upload_run_results = kwargs.pop("upload_run_results", True) + self.upload_sources_json = kwargs.pop("upload_sources_json", True) + self.download_run_results = kwargs.pop("download_run_results", False) + self.download_sources_json = kwargs.pop("download_sources_json", False) + self.dbt_api_enabled = kwargs.pop("dbt_api_enabled", False) + + self.static_artifacts = { # {filepath: tag} + "target/graph_summary.json": "latest", + "target/graph.gpickle": "latest", + "target/partial_parse.msgpack": "latest", + "target/semantic_manifest.json": "latest", + } + + super().__init__( + python_callable=python_callable, + op_args=op_args, + op_kwargs=op_kwargs, + bash_command=NOTSET, + multiple_outputs=False, + **kwargs, + ) + + def _str_to_bool(self, s: str) -> bool: + return s.lower() in ("true", "1", "yes", "y") + + def _copy_readonly_repo(self): + # Copy Datacoves' readonly repo to a known destination + # and set Python and Airflow to use that path + readonly_repo = os.environ["DATACOVES__REPO_PATH_RO"] + destination = os.environ["DATACOVES__REPO_PATH"] + subprocess.run(["mkdir", "-p", destination], check=True) + subprocess.run(["cp", "-rfT", f"{readonly_repo}/", destination], check=False) + # Add destination to the Python path + os.environ["PYTHONPATH"] = f"{destination}:{os.environ.get('PYTHONPATH')}" + sys.path.append(destination) + if self.cwd: + self.cwd = f"{destination}/{self.cwd}" + else: + self.cwd = destination + + def _download_latest_file(self, file): + file_path = Path(self.cwd, file) + self.dbt_api.download_latest_file_by_filename( + filename=file_path.name, + destination=file_path, + ) + + def _download_tagged_file(self, file): + file_path = Path(self.cwd, "logs", file) + self.dbt_api.download_file_by_tag( + f"{self.upload_tag}-{file_path.stem}", file_path + ) + + def _download_dbt_static_files(self): + files_to_download = self.static_artifacts.copy() + files_to_download.update( + {file: None for file in self.download_additional_files} + ) + for file, tag in files_to_download.items(): + if tag == "latest": + self._download_latest_file(file) + else: + self._download_tagged_file(file) + + def _upload_file(self, file): + file_path = Path(self.cwd, "target", file) + if file_path.exists(): + file_payload = {} + with open(file_path, "rb") as f: + file_tag = f"{self.upload_tag}-{file_path.stem}" + file_payload["files[0][tag]"] = ( + None, + file_tag, + ) + file_payload["files[0][file]"] = ( + file_path.name, + f, + ) + self.dbt_api.upload_files(file_payload) + else: + print(f"File {file_path} not found") + + def _upload_sources_json(self): + pass + + def _upload_results(self): + files_to_upload = self.static_artifacts.copy() + files_to_upload.update({file: None for file in self.upload_additional_files}) + files_payload = {} + tags_to_delete = [] + for index, (file, tag) in enumerate(files_to_upload.items()): + file_path = Path(self.cwd, file) + if file_path.exists(): + if tag: + file_tag = f"{tag}-{file_path.stem}" + files_payload[f"files[{index}][tag]"] = ( + None, + file_tag, + ) + else: + file_tag = f"{self.upload_tag}-{file_path.stem}" + files_payload[f"files[{index}][tag]"] = ( + None, + file_tag, + ) + files_payload[f"files[{index}][file]"] = ( + file_path.name, + open(file_path, "rb"), + ) + tags_to_delete.append(file_tag) + else: + print(f"File {file_path} not found") + self.dbt_api.upload_files(files_payload) + + def _upload_latest_manifest(self): + with open(f"{self.cwd}/target/manifest.json", "rb") as f: + files_payload = {"file": f} + return self.dbt_api.upload_latest_manifest( + env_slug=self.environment_slug, + run_id=self.upload_tag, + files_payload=files_payload, + ) + + def execute(self, context: Context) -> Any: + if self.airflow_connection_name: + profile_path = Path("/tmp/profiles.yml") + + with open(str(profile_path), "wt") as output: + output.write( + GenerateDbtProfiles.generate( + self.airflow_connection_name, self.target, self.overrides + ) + ) + + if self.env is None or self.env == {}: + self.append_env = True + self.env = {} + + self.env["DBT_PROFILES_DIR"] = "/tmp" + + context_merge(context, self.op_kwargs) + + # If the user didn't pass an upload tag, set it to Airflow Context's dag run id + if not self.upload_tag: + self.upload_tag = context["dag_run"].run_id + + # Copy the readonly repo to tmp. This must be done before calling the super().execute + self._copy_readonly_repo() + kwargs = determine_kwargs(self.python_callable, self.op_args, context) + if self.dbt_api_enabled: + self.dbt_api = DatacovesDbtAPI() + if self.download_static_artifacts: + self.dbt_api.download_latest_manifest( + trimmed=False, destination=f"{self.cwd}/target/manifest.json" + ) + self._download_dbt_static_files() + if self.download_run_results: + self._download_tagged_file("run_results.json") + if self.download_sources_json: + self._download_tagged_file("sources.json") + if "expected_files" in kwargs: + all_found = True + expected_files = kwargs.pop("expected_files") + if not isinstance(expected_files, list): + raise TypeError( + "The expected_files parameter must be a list of strings." + ) + for file in expected_files: + logs_file_path = Path(self.cwd, "logs", file) + if not logs_file_path.exists(): + print(f"Expected file {logs_file_path} not found") + all_found = False + break + else: + print(f"Expected file {logs_file_path} found") + kwargs["expected_files"] = all_found + + bash_command = self.python_callable(*self.op_args, **kwargs) + if not isinstance(bash_command, str) or bash_command.strip() == "": + raise TypeError( + "The returned value from the TaskFlow callable must be a non-empty string." + ) + # For some reason, if I don't specify both these parameters, python + # claims I'm missing positional arguments. + self.bash_command = self._get_full_command( + command=bash_command, virtualenv=None + ) + + try: + return super(DatacovesDbtOperator, self).execute( + context, perform_copy=False + ) + except Exception as e: + raise e + finally: + if self.dbt_api_enabled: + if self.upload_static_artifacts: + self._upload_results() + if self.upload_run_results: + self._upload_file("run_results.json") + if self.upload_sources_json: + self._upload_file("sources.json") + + +def datacoves_dbt_task( + python_callable: Callable | None = None, + **kwargs, +) -> TaskDecorator: + """ + Wrap a function into a BashOperator. + + Accepts kwargs for operator kwargs. Can be reused in a single DAG. This function is only used only used + during type checking or auto-completion. + + :param python_callable: Function to decorate. + + :meta private: + """ + return task_decorator_factory( + python_callable=python_callable, + decorated_operator_class=_DatacovesDbtDecoratedOperator, + **kwargs, + ) diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/provider_info.py b/src/common/providers/datacoves/datacoves_airflow_provider/provider_info.py new file mode 100644 index 00000000..64e60880 --- /dev/null +++ b/src/common/providers/datacoves/datacoves_airflow_provider/provider_info.py @@ -0,0 +1,29 @@ +def get_provider_info(): + return { + "package-name": "datacoves-airflow-provider", + "name": "Datacoves Airflow Provider", + "description": "An Airflow provider for Datacoves", + "versions": ["0.0.2"], + "hook-class-names": [], + "extra-links": [], + "operators": [], + "sensors": [], + "secrets-backends": [], + "transfers": [], + "hooks": [], + "executors": [], + "task-decorators": [ + { + "name": "datacoves_bash", + "class-name": "datacoves_airflow_provider.decorators.bash.datacoves_bash_task", + }, + { + "name": "datacoves_dbt", + "class-name": "datacoves_airflow_provider.decorators.dbt.datacoves_dbt_task", + }, + { + "name": "datacoves_airflow_db_sync", + "class-name": "datacoves_airflow_provider.decorators.data_sync.datacoves_data_sync_task", + }, + ], + } diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/testing/__init__.py b/src/common/providers/datacoves/datacoves_airflow_provider/testing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/testing/custom_reporter.py b/src/common/providers/datacoves/datacoves_airflow_provider/testing/custom_reporter.py new file mode 100644 index 00000000..deddc3cb --- /dev/null +++ b/src/common/providers/datacoves/datacoves_airflow_provider/testing/custom_reporter.py @@ -0,0 +1,75 @@ +import sys +from pathlib import Path + +from _pytest.config import Config +from _pytest.reports import TestReport + + +class CustomReporter: + def __init__(self, config: Config): + self.config = config + self.stats = {} + self.warnings = [] + + def pytest_runtest_logreport(self, report: TestReport): + if report.when == "call": + self.stats.setdefault(report.outcome, []).append(report) + + def pytest_warning_recorded(self, warning_message): + self.warnings.append(warning_message) + + def pytest_sessionfinish(self): + try: + output_file = self.config.getoption("output_file") + if not output_file: + return + + Path(output_file).parent.mkdir(parents=True, exist_ok=True) + + with open(output_file, "w", encoding="utf-8") as f: + # Only add warnings header if there are warnings + if self.warnings: + f.write("⚠️ **Test Warnings Detected:**\n\n") + + f.write("```\n") + + # Simple test summary + total = sum(len(v) for v in self.stats.values()) + passed = len(self.stats.get("passed", [])) + failed = len(self.stats.get("failed", [])) + skipped = len(self.stats.get("skipped", [])) + + f.write("Tests Summary:\n") + f.write(f"Total Tests: {total}\n") + f.write(f"Passed: {passed}\n") + if failed: + f.write(f"Failed: {failed}\n") + if skipped: + f.write(f"Skipped: {skipped}\n") + + # Only show warnings if they exist + if self.warnings: + f.write("\nWarnings:\n") + for warning in self.warnings: + f.write(f"- {str(warning.message)}\n") + + f.write("```\n") + + except Exception as e: + print(f"Error writing report: {e}", file=sys.stderr) + + +def pytest_configure(config): + output_file = config.getoption("output_file", None) + if output_file: + reporter = CustomReporter(config) + config.pluginmanager.register(reporter) + + +def pytest_addoption(parser): + parser.addoption( + "--output-file", + action="store", + default=None, + help="path to output file for custom formatted results", + ) diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/utils/__init__.py b/src/common/providers/datacoves/datacoves_airflow_provider/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/common/providers/datacoves/datacoves_airflow_provider/utils/dbt_api.py b/src/common/providers/datacoves/datacoves_airflow_provider/utils/dbt_api.py new file mode 100644 index 00000000..6109feeb --- /dev/null +++ b/src/common/providers/datacoves/datacoves_airflow_provider/utils/dbt_api.py @@ -0,0 +1,189 @@ +import json +import os +import urllib.parse +from pathlib import Path + +import requests + +from airflow.models import Variable + + +class DatacovesDbtAPI: + def __init__(self): + self.user_token = os.getenv("DATACOVES__SECRETS_TOKEN") + self.airflow_token = os.getenv("DATACOVES__UPLOAD_MANIFEST_TOKEN") + base_url_internal = os.getenv("DATACOVES__DBT_API_URL") + base_url_external = os.getenv("DATACOVES__EXTERNAL_URL") + use_external_url = self._str_to_bool( + os.getenv("DATACOVES__USE_EXTERNAL_URL", "false") + ) + self.base_url = base_url_external if use_external_url else base_url_internal + self.environment_slug = os.getenv("DATACOVES__ENVIRONMENT_SLUG") + self.project_slug = os.getenv("DATACOVES__PROJECT_SLUG") + self.download_successful = False + try: + project_key = Variable.get("datacoves-dbt-api-secret") + except KeyError: + raise Exception( + "datacoves-dbt-api-secret not found in Airflow Variables." + "Make sure your Project is configured correctly." + ) + self.project_headers = { + "Authorization": f"Bearer {project_key}", + "Accept": "application/json", + } + + def _str_to_bool(self, s: str) -> bool: + return s.lower() in ("true", "1", "yes", "y") + + def get_endpoint(self, endpoint: str) -> str: + return f"{self.base_url}/{endpoint}" + + def api_call( + self, + method: str, + endpoint: str, + headers: dict, + data: dict = None, + files: dict = None, + ): + try: + url = self.get_endpoint(endpoint) + response = requests.request( + method, url, headers=headers, json=data, files=files + ) + # response.raise_for_status() + return response + except Exception: + response_errors = response.json().get("errors") + raise Exception(response_errors) + + def upload_latest_manifest( + self, + env_slug: str, + run_id: str, + dag_id: str, + files_payload, + ): + data = { + "environment_slug": env_slug, + "run_id": run_id, + "dag_id": dag_id, + } + res = self.api_call( + "POST", + "api/internal/manifests", + headers=self.project_headers, + files=files_payload, + data=data, + ) + if res.ok: + print("Manifest uploaded successfully") + elif res.status_code == 404: + print("Manifest not found") + else: + errors = res.json().get("errors") + print(f"Error uploading manifest: {errors}") + + def download_latest_manifest( + self, + trimmed=True, + destination=f"{os.getenv('DATACOVES__REPO_PATH')}/transform/target/manifest.json", + ): + destination_path = Path(destination) + destination_path.parent.mkdir(parents=True, exist_ok=True) + query_str = f"trimmed={str(trimmed).lower()}" + res = self.api_call( + "GET", + f"api/internal/projects/{self.project_slug}/latest-manifest?{query_str}", + headers=self.project_headers, + ) + if res.ok: + manifest = res.json() + with open(destination_path, "w") as f: + json.dump(manifest, f, indent=4) + print(f"Downloaded manifest to {destination_path.absolute()}") + else: + errors = res.json().get("errors") + print(f"Error downloading manifest: {errors}") + + def download_file_by_tag(self, tag: str, destination: str): + destination_path = Path(destination) + destination_path.parent.mkdir(parents=True, exist_ok=True) + escape_tag = urllib.parse.quote(tag) + params = f"tag={escape_tag}" + res = self.api_call( + "GET", + f"api/internal/environments/{self.environment_slug}/files?{params}", + headers=self.project_headers, + ) + print(f"Downloading file with tag {tag} to {destination}") + if res.ok: + try: + content = res.json().get("data", {}).get("contents", "") + if type(content) is dict: + content = json.dumps(content, indent=4) + except requests.exceptions.JSONDecodeError: + content = res.text + with open(destination, "w") as f: + f.write(content) + print(f"Downloaded {destination}") + self.download_successful = True + else: + errors = res.json().get("errors") + print(f"Error downloading {destination}: {errors}") + + def download_latest_file_by_filename( + self, + filename: str, + destination: str, + ): + # Call dbt_api /files with filename=filename + destination_path = Path(destination) + destination_path.parent.mkdir(parents=True, exist_ok=True) + params = f"filename={filename}" + res = self.api_call( + "GET", + f"api/internal/environments/{self.environment_slug}/files?{params}", + headers=self.project_headers, + ) + if res.ok: + try: + content = res.json().get("data", {}).get("contents", "") + if type(content) is dict: + content = json.dumps(content, indent=4) + except requests.exceptions.JSONDecodeError: + content = res.text + with open(destination, "w") as f: + f.write(content) + print(f"Downloaded {destination}") + self.download_successful = True + else: + errors = res.json().get("errors") + print(f"Error downloading {destination}: {errors}") + + def upload_files(self, files: dict): + res = self.api_call( + "POST", + f"api/internal/environments/{self.environment_slug}/files", + headers=self.project_headers, + files=files, + ) + if res.ok: + file_values = [value[0] for key, value in files.items() if "file" in key] + print(f"Files uploaded successfully: {file_values}") + else: + errors = res.json().get("errors") + print(f"Error uploading files: {errors}") + + def delete_files(self, tags_to_delete: list): + for tag in tags_to_delete: + self.delete_file_by_tag(tag) + + def delete_file_by_tag(self, tag: str): + self.api_call( + "DELETE", + f"api/internal/environments/{self.environment_slug}/files", + headers=self.project_headers, + data={"tag": tag}, + ) diff --git a/src/common/providers/datacoves/setup.py b/src/common/providers/datacoves/setup.py new file mode 100644 index 00000000..e87acacc --- /dev/null +++ b/src/common/providers/datacoves/setup.py @@ -0,0 +1,22 @@ +from setuptools import find_packages, setup + +setup( + name="datacoves-airflow-provider", + version="0.0.1", + description="An Airflow provider for Datacoves", + packages=find_packages(), + install_requires=[ + "apache-airflow>=2.10", + ], + entry_points={ + "apache_airflow_provider": [ + "provider_info = datacoves_airflow_provider.provider_info:get_provider_info" + ] + }, + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Operating System :: OS Independent", + ], + python_requires=">=3.8", +) diff --git a/src/common/providers/providers.txt b/src/common/providers/providers.txt new file mode 100644 index 00000000..c57f673b --- /dev/null +++ b/src/common/providers/providers.txt @@ -0,0 +1,29 @@ +apache-airflow-providers-snowflake==5.8.0 +acryl-datahub-airflow-plugin[plugin-v2]==0.14.1.9 +airflow-provider-fivetran-async==2.0.2 +apache-airflow-providers-airbyte==3.6.0 +apache-airflow-providers-microsoft-azure==11.0.0 +apache-airflow-providers-amazon[s3fs]==9.0.0 +apache-airflow-providers-celery==3.8.3 +apache-airflow-providers-cncf-kubernetes==10.3.1 +apache-airflow-providers-databricks==6.12.0 +apache-airflow-providers-docker==3.14.0 +apache-airflow-providers-elasticsearch==5.5.2 +apache-airflow-providers-google==10.25.0 +apache-airflow-providers-grpc==3.7.3 +apache-airflow-providers-hashicorp==3.8.0 +apache-airflow-providers-mysql==5.7.3 +apache-airflow-providers-odbc==4.8.0 +apache-airflow-providers-openlineage==1.13.0 +apache-airflow-providers-oracle==3.12.0 +apache-airflow-providers-postgres==5.13.1 +apache-airflow-providers-redis==3.8.0 +apache-airflow-providers-sendgrid==3.6.0 +apache-airflow-providers-sftp==4.11.1 +apache-airflow-providers-slack==8.9.1 +apache-airflow-providers-ssh==3.14.0 +apache-airflow-providers-tableau==4.6.1 +packaging>=23.2 +python-jose==3.3.0 +pyyaml>=6.0 +pytest==8.3.4 diff --git a/src/common/requirements/base.txt b/src/common/requirements/base.txt new file mode 100644 index 00000000..1d92ab35 --- /dev/null +++ b/src/common/requirements/base.txt @@ -0,0 +1,13 @@ +acryl-datahub==0.15.0.5 +dbt-core~=1.9.0 +dbt-coves==1.9.5 +git+https://gitlab.com/datacoves/permifrost.git@v0.15.6 +pre-commit==3.7.1 +PyYAML==6.0.2 +shandy-sqlfmt[jinjafmt]==0.26.0 +sqlfluff==3.1.1 +sqlfluff-templater-dbt==3.1.1 +rich==14.0.0 +kubernetes==31.0.0 +uv==0.4.30 +ruff==0.8.3 \ No newline at end of file diff --git a/src/common/requirements/dbt-bigquery.txt b/src/common/requirements/dbt-bigquery.txt new file mode 100644 index 00000000..9b738900 --- /dev/null +++ b/src/common/requirements/dbt-bigquery.txt @@ -0,0 +1,2 @@ +-r base.txt +dbt-bigquery~=1.9.0 \ No newline at end of file diff --git a/src/common/requirements/dbt-databricks.txt b/src/common/requirements/dbt-databricks.txt new file mode 100644 index 00000000..b660e634 --- /dev/null +++ b/src/common/requirements/dbt-databricks.txt @@ -0,0 +1,2 @@ +-r base.txt +dbt-databricks~=1.9.0 \ No newline at end of file diff --git a/src/common/requirements/dbt-redshift.txt b/src/common/requirements/dbt-redshift.txt new file mode 100644 index 00000000..a93822c9 --- /dev/null +++ b/src/common/requirements/dbt-redshift.txt @@ -0,0 +1,3 @@ +-r base.txt +psycopg2-binary==2.9.9 +dbt-redshift~=1.9.0 \ No newline at end of file diff --git a/src/common/requirements/dbt-snowflake.txt b/src/common/requirements/dbt-snowflake.txt new file mode 100644 index 00000000..a6433273 --- /dev/null +++ b/src/common/requirements/dbt-snowflake.txt @@ -0,0 +1,6 @@ +-r base.txt +dbt-snowflake~=1.9.0 +snowflake-connector-python[pandas]==3.14.0 +snowflake-snowpark-python==1.25.0 +snowflake-cli==3.7.1 +certifi==2025.1.31 diff --git a/src/common/set_adapters_app.sh b/src/common/set_adapters_app.sh new file mode 100755 index 00000000..22e3539e --- /dev/null +++ b/src/common/set_adapters_app.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +# First argument is adapter path, could be 'all', 'bigquery', 'snowflake', or 'databricks' +# second argument is path to python libs, when specified, adapter is mandatory +# third argument used to skip validation, could be just "skip-validation" + +set -e + +ADAPTER=${1:-"all"} +LOCATION=${2:-"/opt/datacoves/virtualenvs/main/lib"} +VALIDATE=${3:-"true"} + +APP_NAME="${DATACOVES__DB_APP_NAME:-datacoves}" +PATTERN='"dbt"' +REPLACEMENT='"'$APP_NAME'"' +SUBPATH="**" + +if [ $ADAPTER != "all" ]; then + SUBPATH=$ADAPTER +fi + +if [ $ADAPTER == "bigquery" ]; then + PATTERN='"dbt-bigquery-{dbt_version.version}"' +fi + +if [ $ADAPTER == "databricks" ]; then + PATTERN='"dbt-databricks/{__version__}"' +fi + +if [ $ADAPTER == "spark" ]; then + PATTERN='"dbt-labs-dbt-spark/{dbt_spark_version}' + REPLACEMENT='"'$APP_NAME'' +fi + +OCCURRENCES=$(find $LOCATION -iwholename "**/adapters/$SUBPATH/connections.py" -type f -exec grep -l ''$PATTERN'' {} \; | wc -l) + +if [ $VALIDATE == "true" ] && [ $ADAPTER != "all" ] && [ $OCCURRENCES -eq 0 ]; then + echo "No occurrences of '$PATTERN' found in adapters/$SUBPATH/connections.py" + exit 1 +fi + +if [ $OCCURRENCES -gt 0 ]; then + # Replacing default app name in python libraries. + find $LOCATION -iwholename "**/adapters/$SUBPATH/connections.py" -type f -print0 | xargs -0 sed -i 's@'$PATTERN'@'$REPLACEMENT'@g' +fi + +echo "Replaced $OCCURRENCES occurrences of '$PATTERN' in adapters/$SUBPATH/connections.py" diff --git a/src/core/admission-controller-bootstrap/Dockerfile b/src/core/admission-controller-bootstrap/Dockerfile new file mode 100644 index 00000000..cea61004 --- /dev/null +++ b/src/core/admission-controller-bootstrap/Dockerfile @@ -0,0 +1,24 @@ +FROM alpine:3.17 + +LABEL com.datacoves.from=alpine:3.17 +LABEL com.datacoves.library.admission-controller-bootstrap.kubectl=1.25.1 + +ARG APP_USER=container_user +ARG KUBECTL_VERSION=v1.25.1 + +RUN adduser -h /home/user -s /bin/sh -D -u 1000 ${APP_USER} ${APP_USER} \ + && apk --no-cache update \ + && apk --no-cache add openssl curl + +# Installing kubectl +RUN curl -L "https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl" \ + -o /usr/local/bin/kubectl && \ + chmod +x /usr/local/bin/kubectl + +COPY --chown=${APP_USER}:${APP_USER} ./entrypoint.sh / +USER $APP_USER +WORKDIR /home/user +VOLUME ["/home/user"] +ENTRYPOINT ["/entrypoint.sh"] + +CMD ["bootstrap"] diff --git a/src/core/admission-controller-bootstrap/entrypoint.sh b/src/core/admission-controller-bootstrap/entrypoint.sh new file mode 100755 index 00000000..9511c383 --- /dev/null +++ b/src/core/admission-controller-bootstrap/entrypoint.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env sh +set -e +action=$1 +shift + +ADMISSION_SERVICE_WITH_NAMESPACE="${ADMISSION_SERVICE}.${ADMISSION_NAMESPACE}" +ADMISSION_SERVICE_FULL="${ADMISSION_SERVICE}.${ADMISSION_NAMESPACE}.svc" + +generate_ssl() { + if [ -z "${ADMISSION_SERVICE}" ] || [ -z "${ADMISSION_NAMESPACE}" ]; then + echo 'You need to define ADMISSION_SECRET and ADMISSION_NAMESPACE environment variables' + exit 1 + else + echo "Generating cert" + mkdir -p /tmp/cert + cd /tmp/cert && \ + openssl req -new -x509 -sha256 -newkey rsa:2048 -keyout webhook.key -out webhook.crt -days 1024 -nodes -addext "subjectAltName=DNS.1:${ADMISSION_SERVICE}, DNS.2:${ADMISSION_SERVICE_WITH_NAMESPACE}, DNS.3:${ADMISSION_SERVICE_FULL}" -subj "/C=US/ST=St/L=l/O=Datacoves/CN=datacoves.com" + cp /tmp/cert/webhook.key /home/user/webhook.key + cp /tmp/cert/webhook.crt /home/user/webhook.crt + rm -rf /tmp/cert + fi +} + +generate_secret() { + if [ -z "${ADMISSION_SECRET}" ] || [ -z "${ADMISSION_NAMESPACE}" ]; then + echo 'You need to define ADMISSION_SECRET and ADMISSION_NAMESPACE environment variables' + exit 1 + else + if ! kubectl get secret "${ADMISSION_SECRET}" --namespace="${ADMISSION_NAMESPACE}"; + then + echo "Generating secret template" + kubectl create secret generic "${ADMISSION_SECRET}" \ + --from-file=webhook.crt=/home/user/webhook.crt \ + --from-file=webhook.key=/home/user/webhook.key \ + --namespace="${ADMISSION_NAMESPACE}" --dry-run=client -o yaml > /home/user/secret.yaml + echo "Deleting old secrets" + kubectl delete secret "${ADMISSION_SECRET}" --ignore-not-found=true --namespace="${ADMISSION_NAMESPACE}" --grace-period=10 + echo "Creating new secret in cluster" + kubectl create -f /home/user/secret.yaml + else + echo "Secret already exists" + fi + fi +} + +bootstrap() { + generate_ssl + generate_secret +} + +case ${action} in +bootstrap) + bootstrap + exit 0 + ;; +*) + echo "Unknown action: \"${action}\"." + help + ;; +esac + +exec "${action}" "$@" diff --git a/src/core/admission-controller/Dockerfile b/src/core/admission-controller/Dockerfile new file mode 100644 index 00000000..7538167f --- /dev/null +++ b/src/core/admission-controller/Dockerfile @@ -0,0 +1,12 @@ +FROM python:3.11-alpine +LABEL com.datacoves.from=python:3.11-alpine +LABEL com.datacoves.library.admission-controller.fastapi=0.86.0 +WORKDIR /src + +COPY requirements.txt /src/ + +RUN pip install -r /src/requirements.txt + +COPY api.py entrypoint.sh /src/ + +ENTRYPOINT ["/src/entrypoint.sh"] diff --git a/src/core/admission-controller/api.py b/src/core/admission-controller/api.py new file mode 100644 index 00000000..9f2dd698 --- /dev/null +++ b/src/core/admission-controller/api.py @@ -0,0 +1,38 @@ +import logging + +import httpx +from fastapi import FastAPI, Request, status +from fastapi.responses import JSONResponse +from pydantic import BaseSettings + + +class Settings(BaseSettings): + WEBHOOK_URL: str = None + + class Config: + env_file = ".env" + + +settings = Settings() +logger = logging.getLogger(__name__) +logger.setLevel(logging.INFO) +app = FastAPI() +client = httpx.AsyncClient(verify=False) + + +@app.post("/", status_code=200) +async def main(request: Request): + try: + data: dict = await request.json() + r = await client.post(settings.WEBHOOK_URL, json=data) + response = r.json() + except Exception as e: + logger.error(e) + return JSONResponse({}, status_code=status.HTTP_400_BAD_REQUEST) + + return response + + +@app.get("/health", status_code=200) +async def health(request: Request): + return {"health": True} diff --git a/src/core/admission-controller/charts/admission-controller/.helmignore b/src/core/admission-controller/charts/admission-controller/.helmignore new file mode 100644 index 00000000..0e8a0eb3 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/src/core/admission-controller/charts/admission-controller/Chart.yaml b/src/core/admission-controller/charts/admission-controller/Chart.yaml new file mode 100644 index 00000000..c43acc4b --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: admission-controller +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" diff --git a/src/core/admission-controller/charts/admission-controller/templates/NOTES.txt b/src/core/admission-controller/charts/admission-controller/templates/NOTES.txt new file mode 100644 index 00000000..c5b598e7 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "admission-controller.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "admission-controller.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "admission-controller.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "admission-controller.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/src/core/admission-controller/charts/admission-controller/templates/_helpers.tpl b/src/core/admission-controller/charts/admission-controller/templates/_helpers.tpl new file mode 100644 index 00000000..a78b476e --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "admission-controller.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "admission-controller.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "admission-controller.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "admission-controller.labels" -}} +helm.sh/chart: {{ include "admission-controller.chart" . }} +{{ include "admission-controller.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "admission-controller.selectorLabels" -}} +app.kubernetes.io/name: {{ include "admission-controller.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "admission-controller.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "admission-controller.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/src/core/admission-controller/charts/admission-controller/templates/deployment.yaml b/src/core/admission-controller/charts/admission-controller/templates/deployment.yaml new file mode 100644 index 00000000..6aa0b7ad --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/deployment.yaml @@ -0,0 +1,79 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "admission-controller.fullname" . }} + labels: + {{- include "admission-controller.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "admission-controller.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "admission-controller.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "admission-controller.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + args: + - server + env: + - name: SSL_KEYFILE + value: /certs/webhook.key + - name: SSL_CERTFILE + value: /certs/webhook.crt + - name: WEBHOOK_URL + value: http://core-api-svc.core.svc/api/webhook + volumeMounts: + - name: certs-volume + readOnly: true + mountPath: "/certs" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: http + containerPort: 8000 + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: http + scheme: HTTPS + httpHeaders: + - name: Accept + value: application/json + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumes: + - name: certs-volume + secret: + secretName: {{ include "admission-controller.fullname" . }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + diff --git a/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator.yaml b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator.yaml new file mode 100644 index 00000000..13a1c793 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator.yaml @@ -0,0 +1,31 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ .Release.Name }}-webhook-cert-setup + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + helm.sh/hook-weight: "1" +spec: + template: + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ .Release.Name }}-webhook-cert-sa + containers: + - name: webhook-cert-setup + image: "{{ .Values.job.image.repository }}:{{ .Values.job.image.tag | default .Chart.AppVersion }}" + env: + - name: ADMISSION_SERVICE + value: {{ include "admission-controller.fullname" . }} + - name: ADMISSION_SECRET + value: {{ include "admission-controller.fullname" . }} + - name: ADMISSION_NAMESPACE + value: {{ .Release.Namespace }} + args: + - bootstrap + imagePullPolicy: Always + restartPolicy: Never + backoffLimit: 0 \ No newline at end of file diff --git a/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/clusterrole.yaml b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/clusterrole.yaml new file mode 100644 index 00000000..b4a7f319 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/clusterrole.yaml @@ -0,0 +1,57 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ .Release.Name }}-webhook-cert-sa-cluster-role + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded +rules: + - apiGroups: + - admissionregistration.k8s.io + resources: + - mutatingwebhookconfigurations + verbs: + - get + - create + - patch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests + verbs: + - get + - create + - delete + - list + - watch + - apiGroups: + - certificates.k8s.io + resources: + - certificatesigningrequests/approval + verbs: + - update + - apiGroups: + - certificates.k8s.io + resources: + - signers + resourceNames: + - kubernetes.io/* # example.com/* can be used to authorize for all signers in the 'example.com' domain + verbs: + - approve + - apiGroups: + - "" + resources: + - secrets + verbs: + - get + - create + - delete + - list + - patch + - update + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get \ No newline at end of file diff --git a/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/clusterrolebinding.yaml b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/clusterrolebinding.yaml new file mode 100644 index 00000000..d1fc2a93 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/clusterrolebinding.yaml @@ -0,0 +1,15 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ .Release.Name }}-webhook-cert-sa-role-binding + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ .Release.Name }}-webhook-cert-sa-cluster-role +subjects: + - kind: ServiceAccount + name: {{ .Release.Name }}-webhook-cert-sa + namespace: {{ .Release.Namespace }} \ No newline at end of file diff --git a/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/service_account.yaml b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/service_account.yaml new file mode 100644 index 00000000..a0e91f17 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/hooks/01_certificator_resources/service_account.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ .Release.Name }}-webhook-cert-sa + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + app: webhook-cert-sa \ No newline at end of file diff --git a/src/core/admission-controller/charts/admission-controller/templates/hpa.yaml b/src/core/admission-controller/charts/admission-controller/templates/hpa.yaml new file mode 100644 index 00000000..46dd0a45 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "admission-controller.fullname" . }} + labels: + {{- include "admission-controller.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "admission-controller.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/src/core/admission-controller/charts/admission-controller/templates/service.yaml b/src/core/admission-controller/charts/admission-controller/templates/service.yaml new file mode 100644 index 00000000..03eb089c --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "admission-controller.fullname" . }} + labels: + {{- include "admission-controller.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http # 8000 + protocol: TCP + name: http + selector: + {{- include "admission-controller.selectorLabels" . | nindent 4 }} + diff --git a/src/core/admission-controller/charts/admission-controller/templates/serviceaccount.yaml b/src/core/admission-controller/charts/admission-controller/templates/serviceaccount.yaml new file mode 100644 index 00000000..78dd4905 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "admission-controller.serviceAccountName" . }} + labels: + {{- include "admission-controller.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/src/core/admission-controller/charts/admission-controller/templates/tests/test-connection.yaml b/src/core/admission-controller/charts/admission-controller/templates/tests/test-connection.yaml new file mode 100644 index 00000000..c5f3ec09 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "admission-controller.fullname" . }}-test-connection" + labels: + {{- include "admission-controller.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "admission-controller.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/src/core/admission-controller/charts/admission-controller/values.yaml b/src/core/admission-controller/charts/admission-controller/values.yaml new file mode 100644 index 00000000..a1b4a474 --- /dev/null +++ b/src/core/admission-controller/charts/admission-controller/values.yaml @@ -0,0 +1,86 @@ +# Default values for admission-controller. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: datacovesprivate/core-admission-controller + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "latest" +job: + image: + repository: datacovesprivate/core-admission-controller-bootstrap + tag: "latest" +imagePullSecrets: + - name: docker-config-datacovesprivate +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/src/core/admission-controller/entrypoint.sh b/src/core/admission-controller/entrypoint.sh new file mode 100755 index 00000000..61d6fbe3 --- /dev/null +++ b/src/core/admission-controller/entrypoint.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env sh +set -e +action=$1 +shift + +help () { + echo " + Container Available Commands + help : show this help + server : run uviconrn server + " +} + +case ${action} in +help) + help + exit 0 + ;; +server) + exec "uvicorn" "api:app" "--host" "${HOST:-0.0.0.0}" "--port" "${PORT:-8000}" "--ssl-keyfile=${SSL_KEYFILE}" "--ssl-certfile=${SSL_CERTFILE}" + ;; +*) + echo "Unknown action: \"${action}\"." + help + ;; +esac + +exec "${action}" "$@" diff --git a/src/core/admission-controller/requirements.txt b/src/core/admission-controller/requirements.txt new file mode 100644 index 00000000..e0c9e414 --- /dev/null +++ b/src/core/admission-controller/requirements.txt @@ -0,0 +1,3 @@ +fastapi==0.86.0 +httpx==0.23.0 +uvicorn[standard]==0.19.0 diff --git a/src/core/api/Dockerfile b/src/core/api/Dockerfile new file mode 100644 index 00000000..ed4e5064 --- /dev/null +++ b/src/core/api/Dockerfile @@ -0,0 +1,76 @@ +FROM python:3.12-slim-bullseye AS local +LABEL com.datacoves.from=python:3.12 +LABEL com.datacoves.library.core-api.django=5.0.7 + +# Azure CLI needs to be installed via APT instead of via pip, because it +# installs a million packages and many of them are too old. +RUN apt-get update \ + && export DEBIAN_FRONTEND=noninteractive && \ + apt-get install -y --no-install-recommends \ + ssh \ + git \ + docker \ + postgresql-client \ + build-essential \ + python3-dev \ + libldap2-dev \ + libsasl2-dev \ + slapd \ + ldap-utils \ + azure-cli \ + gpg \ + curl \ + apt-transport-https + +# Helm support +RUN curl https://baltocdn.com/helm/signing.asc | gpg --dearmor | tee /usr/share/keyrings/helm.gpg > /dev/null && \ + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/helm.gpg] https://baltocdn.com/helm/stable/debian/ all main" | tee /etc/apt/sources.list.d/helm-stable-debian.list && \ + apt-get update && apt-get install helm + +# Configure user and group used to run uwsgi / manage.py runserver +RUN groupadd abc -g 1000 && \ + useradd abc -u 1000 -g 1000 -m -s /bin/bash + +WORKDIR /usr/src + +RUN pip install pip-tools pytest-playwright==0.6.2 pytest-reporter-html1==0.8.2 && \ + playwright install --with-deps firefox + +# Copying playwright cache to abc user +RUN mv /root/.cache/ /home/abc/.cache && \ + chown -R abc:abc /home/abc/.cache + +COPY requirements.txt . +RUN pip install --upgrade pip +RUN pip install -r requirements.txt + +# pip install will work only if `--user` is provided, or after activating a virtualenv +ENV PIP_USER=false + +ENV SNOWFLAKE_VIRTUALENV=/opt/datacoves/virtualenvs/snowflake +RUN python -m venv "${SNOWFLAKE_VIRTUALENV}" +COPY requirements-snowflake.txt . +RUN ${SNOWFLAKE_VIRTUALENV}/bin/pip install -r requirements-snowflake.txt + +ENV REDSHIFT_VIRTUALENV=/opt/datacoves/virtualenvs/redshift +RUN python -m venv "${REDSHIFT_VIRTUALENV}" +COPY requirements-redshift.txt . +RUN ${REDSHIFT_VIRTUALENV}/bin/pip install -r requirements-redshift.txt + +ENV DATABRICKS_VIRTUALENV=/opt/datacoves/virtualenvs/databricks +RUN python -m venv "${DATABRICKS_VIRTUALENV}" +COPY requirements-databricks.txt . +RUN ${DATABRICKS_VIRTUALENV}/bin/pip install -r requirements-databricks.txt + +ENV BIGQUERY_VIRTUALENV=/opt/datacoves/virtualenvs/bigquery +RUN python -m venv "${BIGQUERY_VIRTUALENV}" +COPY requirements-bigquery.txt . +RUN ${BIGQUERY_VIRTUALENV}/bin/pip install -r requirements-bigquery.txt +FROM local AS production + +COPY uwsgi.yaml . +COPY app /usr/src/app + +WORKDIR /usr/src/app + +ENTRYPOINT ["./run.sh"] diff --git a/src/core/api/app/.gitignore b/src/core/api/app/.gitignore new file mode 100644 index 00000000..4faf2d1c --- /dev/null +++ b/src/core/api/app/.gitignore @@ -0,0 +1,5 @@ +.pytest_cache +.links +.screenshots +test-results/output +integration_tests/output diff --git a/src/core/api/app/billing/__init__.py b/src/core/api/app/billing/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/billing/admin.py b/src/core/api/app/billing/admin.py new file mode 100644 index 00000000..72fbbfa8 --- /dev/null +++ b/src/core/api/app/billing/admin.py @@ -0,0 +1,109 @@ +from csvexport.actions import csvexport +from django.contrib import admin, messages +from django.db import models, transaction +from django.forms import ValidationError +from django_json_widget.widgets import JSONEditorWidget + +from datacoves.admin import BaseModelAdmin, DateFieldListFilterExtended + +from .models import Event, Plan, Product, Tally, TallyMark + + +@admin.action(description="Approve selected events") +def approve_events(modeladmin, request, queryset): + try: + with transaction.atomic(): + for event in queryset.order_by("id").all(): + event.approve(request.user) + except ValidationError as ex: + modeladmin.message_user(request, ex.message, level=messages.ERROR) + + +@admin.action(description="Ignore selected events") +def ignore_events(modeladmin, request, queryset): + try: + with transaction.atomic(): + for event in queryset.order_by("id").all(): + event.ignore(request.user) + except ValidationError as ex: + modeladmin.message_user(request, ex.message, level=messages.ERROR) + + +@admin.register(Event) +class EventAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + list_display = ( + "account", + "event_type", + "created_at", + "approval_status", + "approved_by", + "status", + "processed_at", + ) + list_filter = ("account", "event_type", "created_at", "approval_status", "status") + search_fields = ("event_type", "approval_status", "context") + actions = [approve_events, ignore_events] + + +@admin.register(Tally) +class TallyAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("account", "project", "environment", "name", "period") + list_filter = ("account", "name") + search_fields = ("account__name", "name") + + +@admin.register(TallyMark) +class TallyMarkAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ( + "account", + "environment", + "tally", + "time", + "amount", + "status", + "processed_at", + ) + list_filter = ( + "tally__account", + "tally__environment", + "tally", + "status", + ("time", DateFieldListFilterExtended), + ) + search_fields = ( + "tally__account__name", + "error_details", + "tally__environment__slug", + ) + actions = [csvexport] + + +@admin.register(Plan) +class PlanAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + list_display = ("name", "slug", "billing_period") + list_filter = ("billing_period",) + search_fields = ("name",) + readonly_fields = ("slug",) + + +@admin.register(Product) +class ProductAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + list_display = ( + "id", + "name", + "description", + "tally_name", + "service_name", + "charges_per_seat", + ) + search_fields = ("name", "description") + readonly_fields = ("id", "name", "description", "stripe_data") diff --git a/src/core/api/app/billing/apps.py b/src/core/api/app/billing/apps.py new file mode 100644 index 00000000..600af232 --- /dev/null +++ b/src/core/api/app/billing/apps.py @@ -0,0 +1,9 @@ +from django.apps import AppConfig + + +class BillingConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "billing" + + def ready(self): + from . import signals # noqa F401 diff --git a/src/core/api/app/billing/manager.py b/src/core/api/app/billing/manager.py new file mode 100644 index 00000000..67252b44 --- /dev/null +++ b/src/core/api/app/billing/manager.py @@ -0,0 +1,422 @@ +import logging +from datetime import datetime, timedelta + +import stripe +from billing.models import Account, Plan +from django.conf import settings +from django.utils import timezone + +from .models import Credit, Event, Tally, TallyMark + +# For now, we configure stripe globally here, once and for all. If another +# module needs to use stripe (which most shouldn't), they should also import +# this module so that these settings are applied. +stripe.api_key = settings.STRIPE_API_KEY +stripe.max_network_retries = settings.STRIPE_RETRY_TIMES + + +DAYS_UNTIL_DUE = 5 # days until invoices due + + +def create_checkout_session(account, plan_slug, variant, domain): + """ + Creates a checkout session for the given account, plan, and variant + This method can be used only when we want to charge for developer seats + """ + customer_email = account.owned_by.email + if not account.customer_id: + # create and link stripe customer to datacoves account. + customer = stripe.Customer.create(email=customer_email, name=account.slug) + account.customer_id = customer.id + account.save() + else: + customer = stripe.Customer.retrieve(id=account.customer_id) + + plan = Plan.objects.get(slug=plan_slug) + success_url = ( + f"https://{domain}/admin/billing/checkout?session_id=" + "{CHECKOUT_SESSION_ID}" + ) + cancel_url = f"https://{domain}/admin/billing/cancel" + session = stripe.checkout.Session.create( + line_items=plan.checkout_items(variant), + mode="subscription", + automatic_tax={"enabled": True}, + customer=customer, + customer_update={"address": "auto", "name": "auto"}, + subscription_data={"metadata": {"plan": plan.slug}}, + success_url=success_url, + cancel_url=cancel_url, + ) + account.plan = plan + account.variant = variant + account.settings["last_checkout_session"] = session + account.save() + + +def handle_checkout_session_completed(event): + session = event.data.object + account = Account.objects.get(customer_id=session.customer) + account.customer_id = session["customer"] + now = timezone.now() + if account.trial_ends_at and account.trial_ends_at > now: + account.trial_ends_at = now + account.save() + + +def handle_customer_subscription_created(event): + return handle_customer_subscription_updated(event) + + +# TODO: https://stripe.com/docs/billing/subscriptions/webhooks#state-changes +def handle_customer_subscription_updated(event): + subscription = event.data.object + account = Account.objects.get(customer_id=subscription.customer) + account.update_from_subscription(subscription) + + +def handle_customer_subscription_deleted(event): + subscription = event.data.object + account = Account.objects.get(customer_id=subscription.customer) + # account.plan = None + account.cancelled_subscription = account.subscription + account.subscription = {} + account.subscription_updated_at = timezone.now() + account.save() + + +def _get_si_for_tally(plan: Plan, variant: str, tally_name: str, subscription: dict): + """ + Returns subscription item for tally + """ + + price = plan.tally_price(tally_name, variant) + assert price, f"Price not found for tally {tally_name}" + + service_price = plan.tally_service_price(tally_name, variant) + assert ( + service_price + ), f"Price found for tally {tally_name} but no associated service found in subscription" + + items = [si for si in subscription["items"] if si["price"]["id"] == price["id"]] + + assert ( + len(items) == 1 + ), f"No, or more than one subscription item associated to tally {tally_name}" + return items[0] + + +def report_usage_to_stripe(account_slug: str): + """ + Processes all pending tally marks for account_slug + """ + + if not settings.BILLING_ENABLED: + return + account = Account.objects.get(slug=account_slug) + plan = account.plan + subscribed_at, current_period_start = get_subscription_dates(account) + for tally in Tally.objects.filter(account=account): + tally_marks = tally.marks.filter(status=TallyMark.STATUS_PENDING) + if plan.kind == Plan.KIND_CUSTOM or not plan.informs_usage(account.variant): + tally_marks.update(status=Event.STATUS_IGNORED, processed_at=timezone.now()) + continue + try: + si = _get_si_for_tally( + plan, account.variant, tally.name, account.subscription + ) + except Exception as ex: + error_msg = f"Exception found when attempting to report usage: {str(ex)}" + logging.error(error_msg) + raise Exception(error_msg) + + for tally_mark in tally_marks: + process_tally_mark(tally_mark, subscribed_at, current_period_start, si) + + +def get_subscription_dates(account): + """ + Parses subscription and current period dates from + account.subscription + Current period should always be up-to-date from + Stripe webhooks. + """ + subscribed_at = datetime.fromtimestamp( + account.subscription["start_date"], timezone.get_default_timezone() + ) + current_period_start = datetime.fromtimestamp( + account.subscription["current_period_start"], timezone.get_default_timezone() + ) + current_period_end = datetime.fromtimestamp( + account.subscription["current_period_end"], timezone.get_default_timezone() + ) + # Make sure current period start is correct. + # If Datacoves is not in sync with Stripe notify of the error. + utcnow = datetime.now(timezone.get_default_timezone()) + assert_message = ( + f"Cannot report usage for {account.name}" + f" because database shows current period end: {current_period_end}" + f" is prior to now: {utcnow}" + ) + # if plan is 'custom', meaning 'free', this check does not apply: + if account.plan.kind != Plan.KIND_CUSTOM: + assert utcnow < current_period_end, assert_message + return subscribed_at, current_period_start + + +def process_tally_mark( + tally_mark: TallyMark, + subscribed_at: datetime, + current_period_start: datetime, + subscription_item: dict, +): + tally_mark, send_to_stripe_amount = validate_tally_mark( + tally_mark, subscribed_at, current_period_start + ) + if tally_mark.status == TallyMark.STATUS_PENDING: + process_pending_tally_mark(tally_mark, send_to_stripe_amount, subscription_item) + + +def validate_tally_mark( + tally_mark: TallyMark, subscribed_at: datetime, current_period_start: datetime +): + """ + Ignore events prior to the subscription. + Update tally mark time to current period if mark + is from last period (Stripe won't accept it) + """ + send_to_stripe_amount = int(tally_mark.amount / 60) + if tally_mark.time < subscribed_at: + # If tally mark was generated before subscription actually started + tally_mark.status = Event.STATUS_IGNORED + tally_mark.processed_at = timezone.now() + tally_mark.save() + elif tally_mark.time < current_period_start: + # Usage from yesterday cannot be reported if a new period has started today. + # Report past usage with timestamp after current period started. Also update tally mark. + # Also, avoid hitting duplicate index error. + new_time = current_period_start # No time earlier than this will be accepted by stripe. + tally_mark.time = new_time + exists = ( + TallyMark.objects.filter(tally=tally_mark.tally, time=new_time).count() > 0 + ) + while exists: + new_time += timedelta(seconds=1) + tally_mark.time = new_time + exists = ( + TallyMark.objects.filter(tally=tally_mark.tally, time=new_time).count() + > 0 + ) + + return tally_mark, send_to_stripe_amount + + +def process_pending_tally_mark( + tally_mark: TallyMark, send_to_stripe_amount: int, subscription_item: dict +): + try: + if send_to_stripe_amount > 0: + # Don't send usage = 0 to Stripe. Process mark nevertheless. + stripe.SubscriptionItem.create_usage_record( + subscription_item["id"], + quantity=send_to_stripe_amount, + timestamp=tally_mark.time, + action="set", + idempotency_key=str(tally_mark.id), + ) + tally_mark.status = Event.STATUS_PROCESSED + except Exception as ex: + tally_mark.status = Event.STATUS_FAILED + tally_mark.error_details = str(ex) + logging.error(tally_mark.error_details) + finally: + tally_mark.processed_at = timezone.now() + tally_mark.save() + + +def inform_billing_events(): + if not settings.BILLING_ENABLED: + return + + for event in ( + Event.objects.filter(status=Event.STATUS_PENDING) + .exclude(approval_status=Event.APPROVAL_PENDING) + .order_by("id") + ): + event.status = Event.STATUS_PROCESSED + if event.account.is_subscribed and event.account.plan: + try: + if event.account.plan.kind == event.account.plan.KIND_STARTER: + update_starter_subscription( + event.account, event.created_at, len(event.users) + ) + elif event.account.plan.kind == event.account.plan.KIND_GROWTH: + update_growth_subscription( + event.account, + event.created_at, + len(event.users), + event.service_counts, + ) + else: + # Special plans that are not processed + event.status = Event.STATUS_IGNORED + except Exception as ex: + event.status = Event.STATUS_FAILED + event.error_details = str(ex) + else: + event.status = Event.STATUS_FAILED + event.error_details = ( + f"Missing subscription and/or plan in account {event.account}" + ) + + event.processed_at = timezone.now() + event.save() + + if event.status == Event.STATUS_FAILED: + logging.error(event.error_details) + + +def _get_subscription_item_by_price(subscription, price): + for si in subscription["items"]: + if si["price"]["id"] == price["id"]: + return si + return None + + +def _get_seats_si(account, quantity): + """Generates a line item for seats changes""" + seat_price = account.plan.seat_price(account.variant) + + si = _get_subscription_item_by_price(account.subscription, seat_price) + + new_si = {"quantity": quantity, "price": seat_price["id"]} + if si: + new_si["id"] = si["id"] + if quantity == 0: + new_si["deleted"] = True + else: + if quantity == 0: + return None + return new_si + + +def _get_prorration_date(account: Account, event_date: datetime): + current = event_date.replace(hour=0, minute=0) + period_start = datetime.fromtimestamp( + account.subscription["current_period_start"], timezone.get_default_timezone() + ).replace(tzinfo=timezone.get_default_timezone()) + return max(current, period_start) + + +def _get_subscription_params(account, event_date): + params = { + "proration_behavior": "create_prorations", + "collection_method": "charge_automatically", + "proration_date": _get_prorration_date(account, event_date), + } + if not account.plan.is_monthly: + params["proration_behavior"] = "always_invoice" + params["collection_method"] = "send_invoice" + params["days_until_due"] = DAYS_UNTIL_DUE + return params + + +def update_starter_subscription(account: Account, event_date: datetime, users: int): + params = _get_subscription_params(account, event_date) + stripe.Subscription.modify( + account.subscription_id, items=[_get_seats_si(account, users)], **params + ) + + +def _get_service_si(account, service, quantity): + service_price = account.plan.service_price(service, account.variant) + assert ( + service_price is not None + ), f"Missing service price for plan {account.plan} and service {service}. \ + Please verify that Product and Price exist on Stripe and have been synced to database." + si = _get_subscription_item_by_price(account.subscription, service_price) + new_si = {"quantity": quantity, "price": service_price["id"]} + if si: + new_si["id"] = si["id"] + if quantity == 0: + new_si["deleted"] = True + else: + if quantity == 0: + return None + return new_si + + +def _get_metered_si(account, service_name): + service_name = service_name.lower() + metered_price = account.plan.get_metered_price_by_service( + service_name, account.variant + ) + if metered_price: + new_si = {"price": metered_price["id"]} + si = _get_subscription_item_by_price(account.subscription, metered_price) + if si: + new_si["id"] = si["id"] + return new_si + return None + + +def update_growth_subscription( + account: Account, event_date: datetime, users: int, services +): + """ + Process a growth plan subscription update. + If there is a credit, it is subtracted from the number of users and services + """ + params = _get_subscription_params(account, event_date) + credit = Credit.objects.get_credit(account) + + if credit: + users -= credit.developer_seats + services[settings.SERVICE_AIRBYTE] -= credit.airbyte_instances + services[settings.SERVICE_AIRFLOW] -= credit.airflow_instances + services[settings.SERVICE_SUPERSET] -= credit.superset_instances + services[settings.SERVICE_DATAHUB] -= credit.datahub_instances + + seats_si = _get_seats_si(account, users) + + airbyte_si, airbyte_usage_si = _get_airservice_sis( + account=account, services=services, service_name=settings.SERVICE_AIRBYTE + ) + airflow_si, airflow_usage_si = _get_airservice_sis( + account=account, services=services, service_name=settings.SERVICE_AIRFLOW + ) + superset_si = _get_service_si( + account, settings.SERVICE_SUPERSET, services[settings.SERVICE_SUPERSET] + ) + datahub_si = _get_service_si( + account, settings.SERVICE_DATAHUB, services[settings.SERVICE_DATAHUB] + ) + items = [ + seats_si, + airbyte_si, + airbyte_usage_si, + airflow_si, + airflow_usage_si, + superset_si, + datahub_si, + ] + + subscription = stripe.Subscription.modify( + account.subscription_id, + items=[item for item in items if item is not None], + **params, + ) + account.update_from_subscription(subscription) + + +def _get_airservice_sis(account: Account, services: dict, service_name: str) -> tuple: + airservice_si = _get_service_si(account, service_name, services[service_name]) + if airservice_si: + airservice_usage_si = _get_metered_si(account, service_name) + else: + airservice_usage_si = None + return airservice_si, airservice_usage_si + + +def cancel_subscription(account): + if account.is_subscribed: + stripe.Subscription.delete(account.subscription_id) diff --git a/src/core/api/app/billing/migrations/0001_initial.py b/src/core/api/app/billing/migrations/0001_initial.py new file mode 100644 index 00000000..8c9b14b7 --- /dev/null +++ b/src/core/api/app/billing/migrations/0001_initial.py @@ -0,0 +1,28 @@ +# Generated by Django 3.2.6 on 2022-06-10 20:18 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('users', '0002_user_is_service_account'), + ] + + operations = [ + migrations.CreateModel( + name='Event', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('event_type', models.CharField(choices=[('group_added', 'User added to new group'), ('group_deleted', 'User removed from group'), ('permission_added', 'New permission added to group'), ('permission_deleted', 'Permission removed from group'), ('env_created', 'Environment created'), ('env_deleted', 'Environment deleted'), ('services_changed', 'Services changed')], max_length=30, null=True)), + ('context', models.JSONField(default=dict)), + ('environments', models.JSONField(default=list)), + ('processed_at', models.DateTimeField(blank=True, null=True)), + ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.account')), + ], + ), + ] diff --git a/src/core/api/app/billing/migrations/0002_auto_20220610_2224.py b/src/core/api/app/billing/migrations/0002_auto_20220610_2224.py new file mode 100644 index 00000000..992ae3f3 --- /dev/null +++ b/src/core/api/app/billing/migrations/0002_auto_20220610_2224.py @@ -0,0 +1,24 @@ +# Generated by Django 3.2.6 on 2022-06-10 22:24 + +import django.core.serializers.json +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='event', + name='context', + field=models.JSONField(default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder), + ), + migrations.AlterField( + model_name='event', + name='environments', + field=models.JSONField(default=list, encoder=django.core.serializers.json.DjangoJSONEncoder), + ), + ] diff --git a/src/core/api/app/billing/migrations/0003_auto_20221010_1225.py b/src/core/api/app/billing/migrations/0003_auto_20221010_1225.py new file mode 100644 index 00000000..de6f3c0f --- /dev/null +++ b/src/core/api/app/billing/migrations/0003_auto_20221010_1225.py @@ -0,0 +1,47 @@ +# Generated by Django 3.2.6 on 2022-10-10 12:25 + +import billing.models.tally +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0003_user_slug'), + ('projects', '0067_auto_20220928_1611'), + ('billing', '0002_auto_20220610_2224'), + ] + + operations = [ + migrations.CreateModel( + name='Tally', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.SlugField()), + ('period', models.DurationField(default=billing.models.tally.default_tally_period)), + ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tallies', to='users.account')), + ('environment', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tallies', to='projects.environment')), + ('project', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='tallies', to='projects.project')), + ], + ), + migrations.CreateModel( + name='TallyMark', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('time', models.DateTimeField()), + ('amount', models.FloatField()), + ('tally', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='marks', to='billing.tally')), + ], + ), + migrations.AddConstraint( + model_name='tallymark', + constraint=models.UniqueConstraint(fields=('tally', 'time'), name='unique_tally_and_time'), + ), + migrations.AddConstraint( + model_name='tally', + constraint=models.UniqueConstraint(fields=('account', 'project', 'environment', 'name'), name='unique_scope_and_name'), + ), + ] diff --git a/src/core/api/app/billing/migrations/0004_auto_20221013_1819.py b/src/core/api/app/billing/migrations/0004_auto_20221013_1819.py new file mode 100644 index 00000000..75972a29 --- /dev/null +++ b/src/core/api/app/billing/migrations/0004_auto_20221013_1819.py @@ -0,0 +1,34 @@ +# Generated by Django 3.2.6 on 2022-10-13 18:19 + +import autoslug.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0003_auto_20221010_1225'), + ] + + operations = [ + migrations.CreateModel( + name='Plan', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.CharField(max_length=50)), + ('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)), + ('billing_period', models.CharField(choices=[('monthly', 'Monthly'), ('yearly', 'Yearly')], default='monthly', max_length=20)), + ('prices', models.JSONField(blank=True, default=list, null=True)), + ('environment_quotas', models.JSONField(blank=True, default=dict, null=True)), + ], + options={ + 'abstract': False, + }, + ), + migrations.AlterModelOptions( + name='tally', + options={'verbose_name_plural': 'tallies'}, + ), + ] diff --git a/src/core/api/app/billing/migrations/0005_auto_20221019_1610.py b/src/core/api/app/billing/migrations/0005_auto_20221019_1610.py new file mode 100644 index 00000000..0aa111c2 --- /dev/null +++ b/src/core/api/app/billing/migrations/0005_auto_20221019_1610.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-10-19 16:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0004_auto_20221013_1819'), + ] + + operations = [ + migrations.AddField( + model_name='plan', + name='kind', + field=models.CharField(choices=[('starter', 'Starter'), ('growth', 'Growth')], default='starter', max_length=20), + ), + migrations.AlterField( + model_name='plan', + name='slug', + field=models.SlugField(unique=True), + ), + ] diff --git a/src/core/api/app/billing/migrations/0006_plan_trial_period_days.py b/src/core/api/app/billing/migrations/0006_plan_trial_period_days.py new file mode 100644 index 00000000..2abe4f54 --- /dev/null +++ b/src/core/api/app/billing/migrations/0006_plan_trial_period_days.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-10-19 16:17 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0005_auto_20221019_1610'), + ] + + operations = [ + migrations.AddField( + model_name='plan', + name='trial_period_days', + field=models.PositiveIntegerField(default=0, help_text='Zero means no trial period.'), + ), + ] diff --git a/src/core/api/app/billing/migrations/0007_auto_20221028_1456.py b/src/core/api/app/billing/migrations/0007_auto_20221028_1456.py new file mode 100644 index 00000000..81ac3323 --- /dev/null +++ b/src/core/api/app/billing/migrations/0007_auto_20221028_1456.py @@ -0,0 +1,37 @@ +# Generated by Django 3.2.6 on 2022-10-28 14:56 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0006_plan_trial_period_days'), + ] + + operations = [ + migrations.CreateModel( + name='Product', + fields=[ + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('id', models.CharField(max_length=40, primary_key=True, serialize=False)), + ('name', models.CharField(max_length=100)), + ('description', models.TextField()), + ('stripe_data', models.JSONField(default=dict)), + ('tally_name', models.SlugField()), + ], + options={ + 'abstract': False, + }, + ), + migrations.RemoveField( + model_name='plan', + name='prices', + ), + migrations.AddField( + model_name='plan', + name='items', + field=models.JSONField(default=list), + ), + ] diff --git a/src/core/api/app/billing/migrations/0008_auto_20221115_2138.py b/src/core/api/app/billing/migrations/0008_auto_20221115_2138.py new file mode 100644 index 00000000..bcef121f --- /dev/null +++ b/src/core/api/app/billing/migrations/0008_auto_20221115_2138.py @@ -0,0 +1,54 @@ +# Generated by Django 3.2.6 on 2022-11-15 21:38 + +import django.core.serializers.json +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0007_auto_20221028_1456'), + ] + + operations = [ + migrations.AddField( + model_name='event', + name='error_details', + field=models.TextField(blank=True, help_text='Details about processing error if existed', null=True), + ), + migrations.AddField( + model_name='event', + name='status', + field=models.CharField(choices=[('pending', 'Pending'), ('processed', 'Processed successfully'), ('ignored', 'Processing not required'), ('failed', 'Failed processing')], default='pending', max_length=30), + ), + migrations.AddField( + model_name='event', + name='users', + field=models.JSONField(default=list, encoder=django.core.serializers.json.DjangoJSONEncoder, help_text='Users with access to at least one code-server pod'), + ), + migrations.AddField( + model_name='product', + name='charges_per_seat', + field=models.BooleanField(default=False, help_text='If charges per user with access to at least one code-server pod'), + ), + migrations.AddField( + model_name='product', + name='service_name', + field=models.CharField(blank=True, help_text='Used to associate services that need to be charged by instance', max_length=50, null=True), + ), + migrations.AlterField( + model_name='event', + name='event_type', + field=models.CharField(choices=[('groups_added', 'User added to new groups'), ('groups_deleted', 'User removed from groups'), ('permissions_added', 'New permissions added to group'), ('permissions_deleted', 'Permissions removed from group'), ('env_created', 'Environment created'), ('env_deleted', 'Environment deleted'), ('services_changed', 'Services changed')], max_length=30, null=True), + ), + migrations.AlterField( + model_name='product', + name='description', + field=models.TextField(blank=True, null=True), + ), + migrations.AlterField( + model_name='product', + name='tally_name', + field=models.SlugField(blank=True, help_text='The name of a tally that keeps track of the usage for this product', null=True), + ), + ] diff --git a/src/core/api/app/billing/migrations/0009_auto_20221116_2103.py b/src/core/api/app/billing/migrations/0009_auto_20221116_2103.py new file mode 100644 index 00000000..bee7151a --- /dev/null +++ b/src/core/api/app/billing/migrations/0009_auto_20221116_2103.py @@ -0,0 +1,28 @@ +# Generated by Django 3.2.6 on 2022-11-16 21:03 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0008_auto_20221115_2138'), + ] + + operations = [ + migrations.AddField( + model_name='tallymark', + name='error_details', + field=models.TextField(blank=True, help_text='Details about processing error if existed', null=True), + ), + migrations.AddField( + model_name='tallymark', + name='processed_at', + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name='tallymark', + name='status', + field=models.CharField(choices=[('pending', 'Pending'), ('processed', 'Processed successfully'), ('failed', 'Failed processing')], default='pending', max_length=30), + ), + ] diff --git a/src/core/api/app/billing/migrations/0010_auto_20221117_1340.py b/src/core/api/app/billing/migrations/0010_auto_20221117_1340.py new file mode 100644 index 00000000..8629bfa7 --- /dev/null +++ b/src/core/api/app/billing/migrations/0010_auto_20221117_1340.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-11-17 13:40 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0009_auto_20221116_2103'), + ] + + operations = [ + migrations.AlterField( + model_name='event', + name='status', + field=models.CharField(choices=[('P', 'Pending'), ('S', 'Processed successfully'), ('I', 'Processing not required'), ('F', 'Failed processing')], default='P', max_length=30), + ), + migrations.AlterField( + model_name='tallymark', + name='status', + field=models.CharField(choices=[('P', 'Pending'), ('S', 'Processed successfully'), ('I', 'Processing not required'), ('F', 'Failed processing')], default='P', max_length=30), + ), + ] diff --git a/src/core/api/app/billing/migrations/0011_alter_plan_kind.py b/src/core/api/app/billing/migrations/0011_alter_plan_kind.py new file mode 100644 index 00000000..f8bcf1cb --- /dev/null +++ b/src/core/api/app/billing/migrations/0011_alter_plan_kind.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-11-17 22:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0010_auto_20221117_1340'), + ] + + operations = [ + migrations.AlterField( + model_name='plan', + name='kind', + field=models.CharField(choices=[('starter', 'Starter'), ('growth', 'Growth'), ('custom', 'Custom')], default='starter', max_length=20), + ), + ] diff --git a/src/core/api/app/billing/migrations/0012_alter_plan_items.py b/src/core/api/app/billing/migrations/0012_alter_plan_items.py new file mode 100644 index 00000000..662868a7 --- /dev/null +++ b/src/core/api/app/billing/migrations/0012_alter_plan_items.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-11-17 22:15 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0011_alter_plan_kind'), + ] + + operations = [ + migrations.AlterField( + model_name='plan', + name='items', + field=models.JSONField(blank=True, default=list, null=True), + ), + ] diff --git a/src/core/api/app/billing/migrations/0013_plan_workers_execution_limit.py b/src/core/api/app/billing/migrations/0013_plan_workers_execution_limit.py new file mode 100644 index 00000000..e09ad1e1 --- /dev/null +++ b/src/core/api/app/billing/migrations/0013_plan_workers_execution_limit.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-11-15 18:49 + +import billing.models.plan +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0012_alter_plan_items'), + ] + + operations = [ + migrations.AddField( + model_name='plan', + name='workers_execution_limit', + field=models.JSONField(default=billing.models.plan.get_default_workers_execution_limit, null=True), + ), + ] diff --git a/src/core/api/app/billing/migrations/0014_auto_20221206_1805.py b/src/core/api/app/billing/migrations/0014_auto_20221206_1805.py new file mode 100644 index 00000000..bc7b6f26 --- /dev/null +++ b/src/core/api/app/billing/migrations/0014_auto_20221206_1805.py @@ -0,0 +1,26 @@ +# Generated by Django 3.2.6 on 2022-12-06 18:05 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('billing', '0013_plan_workers_execution_limit'), + ] + + operations = [ + migrations.AddField( + model_name='event', + name='approval_status', + field=models.CharField(choices=[('N', 'Not Required'), ('A', 'Approved'), ('P', 'Pending')], default='P', max_length=1), + ), + migrations.AddField( + model_name='event', + name='approved_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL), + ), + ] diff --git a/src/core/api/app/billing/migrations/0015_auto_20230303_1412.py b/src/core/api/app/billing/migrations/0015_auto_20230303_1412.py new file mode 100644 index 00000000..542bf664 --- /dev/null +++ b/src/core/api/app/billing/migrations/0015_auto_20230303_1412.py @@ -0,0 +1,24 @@ +# Generated by Django 3.2.16 on 2023-03-03 14:12 + +import billing.models.plan +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0014_auto_20221206_1805'), + ] + + operations = [ + migrations.AddField( + model_name='plan', + name='developer_licenses', + field=models.PositiveIntegerField(default=0, help_text='Max number of developer licenses (users with access to at least one code-server pod), zero means infinite.'), + ), + migrations.AlterField( + model_name='plan', + name='workers_execution_limit', + field=models.JSONField(default=billing.models.plan.get_default_workers_execution_limit, help_text='max execution seconds allowed per period at plan level', null=True), + ), + ] diff --git a/src/core/api/app/billing/migrations/0016_rename_items_plan_variants.py b/src/core/api/app/billing/migrations/0016_rename_items_plan_variants.py new file mode 100644 index 00000000..6e36f7e0 --- /dev/null +++ b/src/core/api/app/billing/migrations/0016_rename_items_plan_variants.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2023-11-15 22:12 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0015_auto_20230303_1412'), + ] + + operations = [ + migrations.RenameField( + model_name='plan', + old_name='items', + new_name='variants', + ), + ] diff --git a/src/core/api/app/billing/migrations/0017_auto_20240701_2118.py b/src/core/api/app/billing/migrations/0017_auto_20240701_2118.py new file mode 100644 index 00000000..3cd801a4 --- /dev/null +++ b/src/core/api/app/billing/migrations/0017_auto_20240701_2118.py @@ -0,0 +1,90 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +import billing.models.tally +import django.core.serializers.json +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0016_rename_items_plan_variants'), + ] + + operations = [ + migrations.AlterField( + model_name='event', + name='context', + field=models.JSONField(default=dict, encoder=django.core.serializers.json.DjangoJSONEncoder, help_text="This is a JSON dictionary which is provided when an event is created. The event_type determines what may be in here; for ENV_DELETED or SERVICES_CHANGED, this will at least have an 'id' key with the environment ID impacted by the change."), + ), + migrations.AlterField( + model_name='event', + name='users', + field=models.JSONField(default=list, encoder=django.core.serializers.json.DjangoJSONEncoder, help_text='Users with access to at least one code-server pod. This is a JSON list of user slugs.'), + ), + migrations.AlterField( + model_name='plan', + name='billing_period', + field=models.CharField(choices=[('monthly', 'Monthly'), ('yearly', 'Yearly')], default='monthly', help_text='The frequency of billing', max_length=20), + ), + migrations.AlterField( + model_name='plan', + name='environment_quotas', + field=models.JSONField(blank=True, default=dict, help_text='The default environment quota -- this can be overriden per environment. The format is a Kubernetes resource quota: https://kubernetes.io/docs/concepts/policy/resource-quotas/', null=True), + ), + migrations.AlterField( + model_name='plan', + name='kind', + field=models.CharField(choices=[('starter', 'Starter'), ('growth', 'Growth'), ('custom', 'Custom')], default='starter', help_text='The type of plan', max_length=20), + ), + migrations.AlterField( + model_name='plan', + name='name', + field=models.CharField(help_text='Human readable name. If left blank, this will be set based on kind and period.', max_length=50), + ), + migrations.AlterField( + model_name='plan', + name='slug', + field=models.SlugField(help_text='starter-monthly, starter-yearly, etc. If left blank, this will be automatically set based on kind and period.', unique=True), + ), + migrations.AlterField( + model_name='plan', + name='variants', + field=models.JSONField(blank=True, default=list, help_text='JSON list of Stripe subscription items to allow multiple prices for a given plan.', null=True), + ), + migrations.AlterField( + model_name='product', + name='description', + field=models.TextField(blank=True, help_text='Additional description if needed.', null=True), + ), + migrations.AlterField( + model_name='product', + name='id', + field=models.CharField(help_text='This should match the corresponding Stripe product ID; it will start with prod_', max_length=40, primary_key=True, serialize=False), + ), + migrations.AlterField( + model_name='product', + name='name', + field=models.CharField(help_text='Human readable, descriptive name of product', max_length=100), + ), + migrations.AlterField( + model_name='product', + name='service_name', + field=models.CharField(blank=True, help_text="Used to associate services that need to be charged by instance. This may be set for you if left blank and we can infer it from the 'name'.", max_length=50, null=True), + ), + migrations.AlterField( + model_name='product', + name='stripe_data', + field=models.JSONField(default=dict, help_text='JSON dictionary which is a representation of the Stripe object (i.e. what Stripe uses in its API)'), + ), + migrations.AlterField( + model_name='product', + name='tally_name', + field=models.SlugField(blank=True, help_text="The name of a tally that keeps track of the usage for this product. See :model:`billing.Tally` This may be set for you if left blank and 'name' contains 'airbyte' or 'airflow' in addition to the word 'compute'", null=True), + ), + migrations.AlterField( + model_name='tally', + name='period', + field=models.DurationField(default=billing.models.tally.default_tally_period, help_text='Time delta between tally marks. The smaller this delta, the more sensitive we are to billing changes. See :model:`billing.TallyMark`'), + ), + ] diff --git a/src/core/api/app/billing/migrations/0018_credit.py b/src/core/api/app/billing/migrations/0018_credit.py new file mode 100644 index 00000000..f3732b8c --- /dev/null +++ b/src/core/api/app/billing/migrations/0018_credit.py @@ -0,0 +1,88 @@ +# Generated by Django 5.0.7 on 2024-11-26 15:21 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("billing", "0017_auto_20240701_2118"), + ("users", "0022_user_setup_enabled"), + ] + + operations = [ + migrations.CreateModel( + name="Credit", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ( + "valid_from", + models.DateField( + help_text="Date when credit starts being applicable" + ), + ), + ( + "valid_until", + models.DateField( + help_text="Date when credit stops being applicable" + ), + ), + ( + "reference", + models.CharField( + help_text="Descriptive credit reference", max_length=250 + ), + ), + ( + "developer_seats", + models.PositiveIntegerField( + default=0, help_text="Developer seats credit" + ), + ), + ( + "airflow_instances", + models.PositiveIntegerField( + default=0, help_text="Airflow instances credit" + ), + ), + ( + "airbyte_instances", + models.PositiveIntegerField( + default=0, help_text="Airbyte instances credit" + ), + ), + ( + "superset_instances", + models.PositiveIntegerField( + default=0, help_text="Superset instances credit" + ), + ), + ( + "datahub_instances", + models.PositiveIntegerField( + default=0, help_text="Datahub instances credit" + ), + ), + ( + "account", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="users.account" + ), + ), + ], + options={ + "abstract": False, + }, + ), + ] diff --git a/src/core/api/app/billing/migrations/__init__.py b/src/core/api/app/billing/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/billing/models/__init__.py b/src/core/api/app/billing/models/__init__.py new file mode 100644 index 00000000..f1406c8f --- /dev/null +++ b/src/core/api/app/billing/models/__init__.py @@ -0,0 +1,5 @@ +from .credit import * # noqa: F401,F403 +from .event import * # noqa: F401,F403 +from .plan import * # noqa: F401,F403 +from .product import * # noqa: F401,F403 +from .tally import * # noqa: F401,F403 diff --git a/src/core/api/app/billing/models/credit.py b/src/core/api/app/billing/models/credit.py new file mode 100644 index 00000000..c84c8aed --- /dev/null +++ b/src/core/api/app/billing/models/credit.py @@ -0,0 +1,83 @@ +from datetime import date + +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.core.exceptions import ValidationError +from django.db import models + + +class CreditManager(models.Manager): + """The Credit Manager provides the get_credit function used by billing manager""" + + def get_credit(self, account): + """Returns a credit record if exists for the given account and valid today""" + return self.filter( + account=account, valid_from__lte=date.today(), valid_until__gte=date.today() + ).first() + + +class Credit(AuditModelMixin, DatacovesModel): + """Credits represent the prepayments accounts made so certain + products are not included on a subscription during the validity period + """ + + account = models.ForeignKey("users.Account", on_delete=models.CASCADE) + valid_from = models.DateField(help_text="Date when credit starts being applicable") + valid_until = models.DateField(help_text="Date when credit stops being applicable") + reference = models.CharField( + max_length=250, help_text="Descriptive credit reference" + ) + developer_seats = models.PositiveIntegerField( + help_text="Developer seats credit", default=0 + ) + airflow_instances = models.PositiveIntegerField( + help_text="Airflow instances credit", default=0 + ) + airbyte_instances = models.PositiveIntegerField( + help_text="Airbyte instances credit", default=0 + ) + superset_instances = models.PositiveIntegerField( + help_text="Superset instances credit", default=0 + ) + datahub_instances = models.PositiveIntegerField( + help_text="Datahub instances credit", default=0 + ) + + objects = CreditManager() + + def __str__(self): + return f"{self.account} - {self.reference}" + + def clean(self): + """Enforce the integrity of credit; throws ValidationError + if validation periods collide with an existing one.""" + + if self.valid_from >= self.valid_until: + raise ValidationError("Credit validity period is invalid") + + if ( + self.developer_seats == 0 + and self.airflow_instances == 0 + and self.airbyte_instances == 0 + and self.superset_instances == 0 + and self.datahub_instances == 0 + ): + raise ValidationError("No credits specified") + + # Check for overlapping periods with existing credits + overlapping = Credit.objects.filter( + account=self.account, + valid_from__lt=self.valid_until, + valid_until__gt=self.valid_from, + ).exclude(pk=self.pk) + + if overlapping.exists(): + raise ValidationError( + "Credit period overlaps with an existing credit for this account" + ) + + def save(self, *args, **kwargs): + """Override save to enforce the clean() process above""" + + self.clean() + super().save(*args, **kwargs) diff --git a/src/core/api/app/billing/models/event.py b/src/core/api/app/billing/models/event.py new file mode 100644 index 00000000..3c548361 --- /dev/null +++ b/src/core/api/app/billing/models/event.py @@ -0,0 +1,386 @@ +from billing.slack import report_event_to_slack +from core.models import DatacovesModel +from django.conf import settings +from django.core.serializers.json import DjangoJSONEncoder +from django.db import models +from django.db.models import Q +from django.forms import ValidationError +from projects.models import Environment +from users.models import Account, User + +from lib.utils import same_dicts_in_lists + +# from .tasks import task_inform_event_to_stripe + + +class EventManager(models.Manager): + """This manager provides the 'track' method, which is kind of like + a create-if-needed method. It will tally up all the potential items + which can impact billing -- user count, environment count, and service + usage. It also takes into account if the account has the + 'approve_billing_events' flag or not. + + If the end result is a billing change needs to happen due to the + provided event, we will create an Event object. Otherwise, this + will do nothing. + """ + + def track(self, account: Account, event_type: str, context: dict = {}): + """event_type will be one of the event type constants on Event. + context is stored on the newly created Event. What context contains + varies based on type. + + For ENV_DELETED or SERVICES_CHANGED, context should have an 'id' + field which is the environment ID impacted. + """ + + def get_services_user_count(env): + services = {} + # FIXME: Convert this to a single query + for service in settings.SERVICES: + user_count = ( + User.objects.filter( + Q( + groups__permissions__name__startswith=( + f"{env.project.account.slug}:" + f"{env.project.slug}|" + f"workbench:{service}" + ) + ) + | Q( + groups__permissions__name__startswith=( + f"{env.project.account.slug}:" + f"{env.project.slug}:" + f"{env.slug}|" + f"workbench:{service}" + ) + ) + ) + .filter(is_superuser=False) + .distinct() + .count() + ) + services[service] = user_count + return services + + # If account is not subscribed to a paid plan, no events are created + if not account.is_subscribed: + return + + environments = [] + cluster = None + for env in Environment.objects.filter(project__account=account).all(): + # This check is needed as delete signal is captured before env is actually deleted + if event_type == Event.ENV_DELETED and env.id == context["id"]: + continue + new_env = { + "id": env.id, + "service_users": get_services_user_count(env), + } + # This is necessary because signal is triggered on model pre_save + if event_type == Event.SERVICES_CHANGED and env.id == context["id"]: + new_env["services_enabled"] = context["services"] + else: + new_env["services_enabled"] = list(env.enabled_and_valid_services()) + # used to get domain later on slack notification, to be improved + cluster = env.cluster + environments.append(new_env) + + if not environments: + return + + users = ( + User.objects.only("slug") + .filter(groups__permissions__name__contains=f"{account.slug}:") + .filter( + groups__permissions__name__contains=f"workbench:{settings.SERVICE_CODE_SERVER}|", + ) + .filter(is_superuser=False) + .distinct() + ) + + if ( + account.plan.kind in [account.plan.KIND_STARTER, account.plan.KIND_GROWTH] + and account.approve_billing_events + ): + approval_status = "P" + else: + approval_status = "N" + event = self._create_event_if_env_modified( + account=account, + event_type=event_type, + context=context, + environments=environments, + users=[user.slug for user in users], + approval_status=approval_status, + ) + + if event and approval_status == "P": + report_event_to_slack(event, cluster) + + def _create_event_if_env_modified( + self, + account: Account, + event_type: str, + context: dict, + users: list, + environments: list, + approval_status: str, + ) -> any: + # Retrieve last event to compare environments and users. + last_event = self.filter(account=account).order_by("-created_at").first() + # True if changes in environments or users. False if no changes or first event. + environments_changed = last_event and ( + not same_dicts_in_lists(last_event.environments, environments) + or len(last_event.users) != len(users) + ) + + # The first event is triggered when the environment is created. + # If no users were added yet, ignore the event to avoid modifying + # the subscription with quantity = 0. + # There will be another event with the right users. + if not last_event and users or environments_changed: + event = self.create( + account=account, + event_type=event_type, + context=context, + environments=environments, + users=users, + approval_status=approval_status, + ) + return event + return None + + +class Event(DatacovesModel): + """Event object for events which impact billing + + Certain actions taken by our customers can result in events which impact + billing. We will need to update their billing based on these changes. + + Customers may be set up to require approval for billing related changes + as well; this provides the list of items to approve. + + ========= + Constants + ========= + + ----------- + Event Types + ----------- + + - GROUPS_ADDED + - GROUPS_DELETED + - PERMISSIONS_ADDED + - PERMISSIONS_DELETED + - ENV_CREATED + - ENV_DELETED + - SERVICES_CHANGED + - EVENT_TYPES - a tuple of tuple pairs for select box display + + -------- + Statuses + -------- + + - STATUS_PENDING - Needs to be processed + - STATUS_PROCESSED - Set when Stripe pricing is updated + - STATUS_IGNORED - Will not impact billing + - STATUS_FAILED - Failed to update Stripe + - STATUS_TYPES - a tuple of tuple pairs for select box display + + -------- + Approval + -------- + + - APPROVAL_NOT_REQUIRED + - APPROVAL_APPROVED + - APPROVAL_PENDING + - APPROVAL_STATUS - a tuple of tuple pairs for select box display. + + Note that a decline will simply delete the event. + + ======= + Methods + ======= + + - approve(approver) - Will approve the event. 'approver' is a User. + + Approvals must happen in order, oldest approval first. Raises + ValidationError if done out of order. + """ + + GROUPS_ADDED = "groups_added" + GROUPS_DELETED = "groups_deleted" + PERMISSIONS_ADDED = "permissions_added" + PERMISSIONS_DELETED = "permissions_deleted" + ENV_CREATED = "env_created" + ENV_DELETED = "env_deleted" + SERVICES_CHANGED = "services_changed" + EVENT_TYPES = ( + ( + GROUPS_ADDED, + "User added to new groups", + ), + ( + GROUPS_DELETED, + "User removed from groups", + ), + ( + PERMISSIONS_ADDED, + "New permissions added to group", + ), + ( + PERMISSIONS_DELETED, + "Permissions removed from group", + ), + ( + ENV_CREATED, + "Environment created", + ), + ( + ENV_DELETED, + "Environment deleted", + ), + ( + SERVICES_CHANGED, + "Services changed", + ), + ) + + STATUS_PENDING = "P" + STATUS_PROCESSED = "S" + STATUS_IGNORED = "I" + STATUS_FAILED = "F" + STATUS_TYPES = ( + ( + STATUS_PENDING, + "Pending", + ), + ( + STATUS_PROCESSED, + "Processed successfully", + ), + ( + STATUS_IGNORED, + "Processing not required", + ), + ( + STATUS_FAILED, + "Failed processing", + ), + ) + + APPROVAL_NOT_REQUIRED = "N" + APPROVAL_APPROVED = "A" + APPROVAL_PENDING = "P" + APPROVAL_STATUS = ( + ( + APPROVAL_NOT_REQUIRED, + "Not Required", + ), + ( + APPROVAL_APPROVED, + "Approved", + ), + ( + APPROVAL_PENDING, + "Pending", + ), + ) + + created_at = models.DateTimeField(auto_now_add=True, editable=False) + account = models.ForeignKey(Account, on_delete=models.CASCADE) + event_type = models.CharField(max_length=30, choices=EVENT_TYPES, null=True) + context = models.JSONField( + default=dict, + encoder=DjangoJSONEncoder, + help_text="This is a JSON dictionary which is provided when an event " + "is created. The event_type determines what may be in here; for " + "ENV_DELETED or SERVICES_CHANGED, this will at least have an 'id' " + "key with the environment ID impacted by the change.", + ) + environments = models.JSONField(default=list, encoder=DjangoJSONEncoder) + users = models.JSONField( + default=list, + encoder=DjangoJSONEncoder, + help_text="Users with access to at least one code-server pod. This is a JSON list of user slugs.", + ) + approval_status = models.CharField( + max_length=1, choices=APPROVAL_STATUS, default=APPROVAL_PENDING + ) + approved_by = models.ForeignKey( + User, on_delete=models.SET_NULL, null=True, blank=True + ) + status = models.CharField( + max_length=30, choices=STATUS_TYPES, default=STATUS_PENDING + ) + processed_at = models.DateTimeField(null=True, blank=True) + error_details = models.TextField( + null=True, blank=True, help_text="Details about processing error if existed" + ) + + objects = EventManager() + + def __str__(self): + return f"{self.account} {self.event_type} {self.created_at}" + + @property + def service_counts(self): + """Produce a count of enabled services in the account, totalling them + up across all environments.""" + + services = {service: 0 for service in settings.INSTANCE_SERVICES} + for env in self.environments: + for service_name in services.keys(): + if service_name in env["services_enabled"]: + services[service_name] += 1 + return services + + def approve(self, approver): + """Will approve the event. 'approver' is a User. + + Approvals must happen in order, oldest approval first. Raises + ValidationError if done out of order. + """ + + if self.approval_status == self.APPROVAL_PENDING: + previous_pending = Event.objects.filter( + id__lt=self.id, + approval_status=self.APPROVAL_PENDING, + account=self.account, + ).values_list("id", flat=True) + if previous_pending: + raise ValidationError( + f"There are older events you need to approve/ignore first, ids: {list(previous_pending)}" + ) + + self.approval_status = self.APPROVAL_APPROVED + self.approved_by = approver + self.save() + else: + raise ValidationError("Event was already approved") + + def ignore(self, approver): + """Will ignore the event. 'approver' is a User. + + Approvals must happen in order, oldest approval first. Raises + ValidationError if done out of order. + """ + + if self.approval_status == self.APPROVAL_PENDING: + previous_pending = Event.objects.filter( + id__lt=self.id, + approval_status=self.APPROVAL_PENDING, + account=self.account, + ).values_list("id", flat=True) + if previous_pending: + raise ValidationError( + f"There are older events you need to approve/ignore first, ids: {list(previous_pending)}" + ) + + self.approval_status = self.APPROVAL_APPROVED + self.approved_by = approver + self.status = self.STATUS_IGNORED + self.save() + else: + raise ValidationError("Event was already approved") diff --git a/src/core/api/app/billing/models/plan.py b/src/core/api/app/billing/models/plan.py new file mode 100644 index 00000000..12d92e05 --- /dev/null +++ b/src/core/api/app/billing/models/plan.py @@ -0,0 +1,312 @@ +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.db import models + +from .product import Product + + +def get_default_workers_execution_limit(): + return {"airflow": 36000, "airbyte": 36000} + + +class Plan(AuditModelMixin, DatacovesModel): + """ + A Plan is describes a set of products and services provided to Accounts, + and how they will pay for them (how often, at what prices, etc.). + + E.g.: Starter Monthly, Starter yearly, Growth Monthly, Growth Yearly. + A Plan is a stripe Subscription modulo the Account. + + ========= + Constants + ========= + + ---- + Type + ---- + + - KIND_STARTER + - KIND_GROWTH + - KIND_CUSTOM + - KINDS - a tuple of tuple pairs for select box. + + ------ + Period + ------ + + - PERIOD_MONTHLY + - PERIOD_YEARLY + - PERIODS - a tuple of tuple pairs for select box + + + ======= + Methods + ======= + + - **save** is overriden to make defaults for slug and name (see table below) + - **variant_items(variant)** - Unpacks the Stripe items from the + 'variants' list as a list of dictionaries for a given variant name. + - **prices(variant)** - Return a dictionary of price ID's mapping to price + for a given variant name. + - **checkout_items(variant)** - Returns a list of price entries for + Stripe checkout based on what services we can bill for the given variant + - **products(variant)** - Returns a queryset of :model:`billing.Product` + objects associated with a given variant. + - **seat_price(variant)** - Returns a dictionary mapping Stripe price ID + to price amount for associated charge-by-seat products, or None if there + are no such products as part of this plan. + - **service_price(service, variant)** - Returns a dictionary mapping + Stripe price ID to price amount for associated flat-fee service products, or + None if there are no such products as part of this plan. + - **tally_price(tally, variant)** - Returns a dictionary mapping Stripe + price ID to price amount for associated tally-based (i.e utilization + based) products, or None if there are no such products as part of this + plan. + - **tally_service_price(tally, variant)** - Returns a dictionary mapping + Stripe price ID to price amount for associated tally-based + (i.e utilization based) products that are associated with a service, + or None if there are no such products as part of this plan. + - **get_metered_price_by_service(service_name, variant)** - + Metered products have 'service_name' set to the service name + - **informs_usage(variant)** - If contains metered prices, returns true + """ + + slug = models.SlugField( + unique=True, + help_text="starter-monthly, starter-yearly, etc. If left blank, " + "this will be automatically set based on kind and period.", + ) + + name = models.CharField( + max_length=50, + help_text="Human readable name. If left blank, this will be set based on kind and period.", + ) + + KIND_STARTER = "starter" + KIND_GROWTH = "growth" + KIND_CUSTOM = "custom" + KINDS = ( + (KIND_STARTER, "Starter"), + (KIND_GROWTH, "Growth"), + (KIND_CUSTOM, "Custom"), + ) + kind = models.CharField( + max_length=20, choices=KINDS, default=KIND_STARTER, help_text="The type of plan" + ) + + PERIOD_MONTHLY = "monthly" + PERIOD_YEARLY = "yearly" + PERIODS = ( + (PERIOD_MONTHLY, "Monthly"), + (PERIOD_YEARLY, "Yearly"), + ) + billing_period = models.CharField( + max_length=20, + choices=PERIODS, + default=PERIOD_MONTHLY, + help_text="The frequency of billing", + ) + + trial_period_days = models.PositiveIntegerField( + default=0, help_text="Zero means no trial period." + ) + + # variants : {"standard": [{"price": price}, ...]} + # There could be multiple prices with different amounts for the same plan so + # that the commercial team has flexibility to negotiate prices. + variants = models.JSONField( + default=list, + null=True, + blank=True, + help_text="JSON list of Stripe subscription items to allow multiple prices for a given plan.", + ) + + environment_quotas = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="The default environment quota -- this can be overriden " + "per environment. The format is a Kubernetes resource quota: " + "https://kubernetes.io/docs/concepts/policy/resource-quotas/", + ) + + workers_execution_limit = models.JSONField( + default=get_default_workers_execution_limit, + null=True, + help_text="max execution seconds allowed per period at plan level", + ) + developer_licenses = models.PositiveIntegerField( + default=0, + help_text="Max number of developer licenses (users with access to at least " + "one code-server pod), zero means infinite.", + ) + + def __str__(self): + return self.name + + def save(self, *args, **kwargs): + if not self.slug: + self.slug = f"{self.kind}-{self.billing_period}" + + if not self.name: + kind_nice = self.kind.replace("-", " ").capitalize() + billing_period_nice = self.billing_period.capitalize() + self.name = f"{kind_nice} - {billing_period_nice}" + + super().save(*args, **kwargs) + + def variant_items(self, variant: str) -> list: + """Unpacks the Stripe items from the 'variants' list as a list of + dictionaries for a given variant name. + """ + + return [ + list(v.values())[0]["items"] for v in self.variants if variant in v.keys() + ][0] + + def prices(self, variant: str) -> dict: + """Return a dictionary of price ID's mapping to price for a given + variant name. + """ + + return { + item["price"]["id"]: item["price"] for item in self.variant_items(variant) + } + + @property + def is_starter(self) -> bool: + """Returns boolean, True if plan is a starter plan""" + + return self.kind == self.KIND_STARTER + + @property + def is_monthly(self) -> bool: + """Returns boolean, True if plan is a monthly plan""" + + return self.billing_period == self.PERIOD_MONTHLY + + def checkout_items(self, variant: str) -> list: + """Returns a list of price entries of type "licensed" for a Stripe checkout. + It double checks that the products are not associated to services nor tallies, + so it effectively returns just developer seat prices""" + + noservice_products = Product.objects.filter( + service_name="", tally_name="" + ).values_list("id", flat=True) + + # Note that quantity is always 1 since during checkout only one developer has been + # created, the rest is created later and events should be informed accordingly + return [ + {"price": item["price"]["id"], "quantity": 1} + for item in self.variant_items(variant) + if item["price"].get("recurring", {}).get("usage_type", "") == "licensed" + and item["price"]["product"] in noservice_products + ] + + def products(self, variant: str): + """Returns a queryset of :model:`billing.Product` objects associated + with a given variant.""" + + product_ids = [ + price["product"] for price in self.prices(variant=variant).values() + ] + return Product.objects.filter(id__in=product_ids) + + def seat_price(self, variant: str): + """Returns a dictionary mapping Stripe price ID to price amount + for associated charge-by-seat products, or None if there are no + such products as part of this plan. + """ + + seat_products = Product.objects.filter(charges_per_seat=True).values_list( + "id", flat=True + ) + + for price in self.prices(variant=variant).values(): + if price["product"] in seat_products: + return price + + return None + + def service_price(self, service: str, variant: str): + """Returns a dictionary mapping Stripe price ID to price amount + for associated flat-fee service products, or None if there are no + such products as part of this plan. + """ + + service_products = ( + Product.objects.filter(service_name=service) + .filter(tally_name="") + .values_list("id", flat=True) + ) + + for price in self.prices(variant=variant).values(): + if price["product"] in service_products: + return price + + return None + + def tally_price(self, tally: str, variant: str): + """Returns a dictionary mapping Stripe price ID to price amount + for associated tally-based (i.e utilization based) products, + or None if there are no such products as part of this plan. + """ + + tally_products = Product.objects.filter(tally_name=tally).values_list( + "id", flat=True + ) + for price in self.prices(variant=variant).values(): + if price["product"] in tally_products: + return price + return None + + def tally_service_price(self, tally: str, variant: str): + """Returns a dictionary mapping Stripe price ID to price amount + for associated tally-based (i.e utilization based) products that + are associated with a service, or None if there are no such products + as part of this plan. + """ + + tally_product = Product.objects.filter(tally_name=tally).first() + service_name = None + + if tally_product: + service_name = tally_product.service_name + + service_product = ( + Product.objects.filter(service_name=service_name) + .exclude(tally_name=tally_product.name) + .values_list("id", flat=True) + ) + + for price in self.prices(variant=variant).values(): + if price["product"] in service_product: + return price + + return None + + def get_metered_price_by_service(self, service_name: str, variant: str): + """Metered products have 'service_name' set to the service name""" + metered_product = ( + Product.objects.filter(service_name=service_name) + .exclude(tally_name="") + .first() + ) + assert ( + metered_product is not None + ), f"No metered billing found in Products with service name {service_name}" + prices = [ + price + for price in self.prices(variant=variant).values() + if price["product"] == metered_product.id + ] + if prices: + return prices[0] + return None + + def informs_usage(self, variant: str) -> bool: + """If contains metered prices, returns true""" + for item in self.prices(variant).values(): + if item.get("recurring", {}).get("usage_type", "") == "metered": + return True + return False diff --git a/src/core/api/app/billing/models/product.py b/src/core/api/app/billing/models/product.py new file mode 100644 index 00000000..5b4a8ee8 --- /dev/null +++ b/src/core/api/app/billing/models/product.py @@ -0,0 +1,101 @@ +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.db import models + +from .tally import Tally + + +class Product(AuditModelMixin, DatacovesModel): + """Products are our local representation of Stripe products + + This is the mapping between our database's understanding of products + and Stripe's, essentially a mapping to stripe product IDs. We + use their product ID as the primary key here (id). + + This is managed by a combination of products set up in stripe, and + cli.py's download_pricing_model method. This, in turn, is driven by + the 'pricing.yaml' file which is in the cluster config. + + Note that 'save' is overriden on this Model in order to implement the + inference of service_name and tally_name. + """ + + # id, name, description and stripe_data mirror stripe. + id = models.CharField( + max_length=40, + primary_key=True, + help_text="This should match the corresponding Stripe product ID; it will start with prod_", + ) + name = models.CharField( + max_length=100, help_text="Human readable, descriptive name of product" + ) + description = models.TextField( + null=True, blank=True, help_text="Additional description if needed." + ) + stripe_data = models.JSONField( + default=dict, + help_text="JSON dictionary which is a representation of the Stripe object (i.e. what Stripe uses in its API)", + ) + charges_per_seat = models.BooleanField( + default=False, + help_text="If charges per user with access to at least one code-server pod", + ) + service_name = models.CharField( + max_length=50, + null=True, + blank=True, + help_text="Used to associate services that need to be charged by " + "instance. This may be set for you if left blank and we can infer " + "it from the 'name'.", + ) + tally_name = models.SlugField( + null=True, + blank=True, + help_text="The name of a tally that keeps track of the usage for " + "this product. See :model:`billing.Tally` This may be set for you " + "if left blank and 'name' contains 'airbyte' or 'airflow' in " + "addition to the word 'compute'", + ) + + def tallies(self): + """Tallies associated with this Product. Only works if tally_name is + set.""" + + return Tally.objects.filter(name=self.tally_name) + + def __str__(self): + return f"{self.id} ({self.name})" + + def save(self, *args, **kwargs): + """Sets 'service_name' and 'tally_name' if they are not already set + and we can infer the values. + + 'service_name' will be set if 'server' or 'compute' are in the + 'name' field. It will be set to whatever service matches first + in the settings.SERVICES field. + + 'tally_name' will be set if 'compute' is in the 'name' field and + if 'airflow' or 'airbyte' is in the field. 'airbyte' will win if, + for some reason, both words are present. + """ + + # Looking for a service using the product name + lower_name = self.name.lower() + + if not self.service_name and ( + "server" in lower_name or "compute" in lower_name + ): + for service in settings.SERVICES: + if service.lower() in lower_name: + self.service_name = service + break + + # Connecting product with actual tallies + if not self.tally_name: + if "airflow" in lower_name and "compute" in lower_name: + self.tally_name = settings.TALLY_AIRFLOW_WORKERS_NAME + if "airbyte" in lower_name and "compute" in lower_name: + self.tally_name = settings.TALLY_AIRBYTE_WORKERS_NAME + + super().save(*args, **kwargs) diff --git a/src/core/api/app/billing/models/tally.py b/src/core/api/app/billing/models/tally.py new file mode 100644 index 00000000..31b371e5 --- /dev/null +++ b/src/core/api/app/billing/models/tally.py @@ -0,0 +1,127 @@ +from datetime import timedelta + +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.db import models +from projects.models import Environment, Project +from users.models import Account + + +def default_tally_period(): + return timedelta(days=1) + + +class Tally(AuditModelMixin, DatacovesModel): + """ + Tally records billable resources usage. + """ + + # Tally scope can be per account, per project or per environment. + account = models.ForeignKey( + Account, on_delete=models.CASCADE, related_name="tallies" + ) + project = models.ForeignKey( + Project, null=True, on_delete=models.CASCADE, related_name="tallies" + ) + environment = models.ForeignKey( + Environment, null=True, on_delete=models.CASCADE, related_name="tallies" + ) + + # The tally's name. Should make clear what the resource being tracked is and + # in which units. E.g. airflow_workers_daily_running_time_seconds. + name = models.SlugField() + + # Time delta between tally marks. + period = models.DurationField( + default=default_tally_period, + help_text="Time delta between tally marks. The smaller this delta, " + "the more sensitive we are to billing changes. See " + ":model:`billing.TallyMark`", + ) + + class Meta: + verbose_name_plural = "tallies" + constraints = [ + models.UniqueConstraint( + fields=["account", "project", "environment", "name"], + name="unique_scope_and_name", + ), + ] + + def __str__(self): + return f"{self.environment.slug}-{self.name}" + + +class TallyMark(DatacovesModel): + """Keep track of utilization at a point in time + + A TallyMark "tm" records a resource usage amount for the period between + "tm.time" and "tm.time + tm.tally.period". + + ========= + Constants + ========= + + - STATUS_PENDING - has not been accounted for yet + - STATUS_PROCESSED - has been applied to Stripe billing + - STATUS_IGNORED - will not be billed + - STATUS_FAILED - failed while doing Stripe processing + - STATUS_TYPES - tuple of tuple pairs, for select box display. + """ + + STATUS_PENDING = "P" + STATUS_PROCESSED = "S" + STATUS_IGNORED = "I" + STATUS_FAILED = "F" + STATUS_TYPES = ( + ( + STATUS_PENDING, + "Pending", + ), + ( + STATUS_PROCESSED, + "Processed successfully", + ), + ( + STATUS_IGNORED, + "Processing not required", + ), + ( + STATUS_FAILED, + "Failed processing", + ), + ) + + tally = models.ForeignKey(Tally, on_delete=models.CASCADE, related_name="marks") + time = models.DateTimeField() + amount = models.FloatField() + status = models.CharField( + max_length=30, choices=STATUS_TYPES, default=STATUS_PENDING + ) + processed_at = models.DateTimeField(null=True, blank=True) + error_details = models.TextField( + null=True, blank=True, help_text="Details about processing error if existed" + ) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["tally", "time"], + name="unique_tally_and_time", + ), + ] + + def __str__(self): + return f"{self.tally}:{self.time}" + + @property + def account(self): + """The account associated with this tally mark, by way of the tally""" + return self.tally.account + + @property + def environment(self): + """The environment associated with this tally mark, by way of the + tally. This can be None.""" + + return self.tally.environment diff --git a/src/core/api/app/billing/serializers.py b/src/core/api/app/billing/serializers.py new file mode 100644 index 00000000..91a9cdea --- /dev/null +++ b/src/core/api/app/billing/serializers.py @@ -0,0 +1,92 @@ +from billing.models import Plan +from clusters.request_utils import get_cluster +from django.contrib.auth.models import Group +from rest_framework import serializers +from users.models import Account, ExtendedGroup + +from . import manager + + +class PlanSerializer(serializers.ModelSerializer): + class Meta: + model = Plan + fields = ( + "name", + "slug", + "billing_period", + "trial_period_days", + "kind", + ) + + +class AccountSubscriptionSerializer(serializers.Serializer): + """This serializer can be used to create a new account + stripe subscription, or add + a stripe subscription to an existing account""" + + account_slug = serializers.CharField(required=False) + name = serializers.CharField(required=False) + plan = serializers.CharField() + variant = serializers.CharField(default="standard") + billing_period = serializers.CharField(write_only=True) + checkout_session_url = serializers.SerializerMethodField() + + def validate(self, attrs): + request = self.context["request"] + cluster = get_cluster(request) + if not cluster.is_feature_enabled("accounts_signup"): + raise serializers.ValidationError("Accounts provisioning is not supported") + if not cluster.is_feature_enabled("admin_billing"): + raise serializers.ValidationError( + "Accounts billing is temporarily disabled" + ) + account_slug = attrs.get("account_slug") + if account_slug: + account = Account.objects.get(slug=account_slug) + if account.subscription_id: + raise serializers.ValidationError( + "This account already has an active subscription" + ) + else: + # Account is new + active_accounts = Account.objects.active_accounts().count() + max_accounts = cluster.all_limits["max_cluster_active_accounts"] + if active_accounts >= max_accounts: + raise serializers.ValidationError( + "Accounts can't be created at the moment." + ) + return attrs + + def create(self, validated_data): + account_slug = validated_data.get("account_slug") + user = self.context.get("request").user + if account_slug: + account = Account.objects.get(slug=account_slug, created_by=user) + else: + account = Account.objects.create( + name=validated_data["name"], created_by=user + ) + self._add_user_to_account_groups(user, account) + + request = self.context["request"] + api_host = request.META["HTTP_HOST"] + domain = api_host.replace("api.", "") + plan_slug = f"{validated_data['plan']}-{validated_data['billing_period']}" + manager.create_checkout_session( + account, plan_slug, validated_data["variant"], domain + ) + return account + + def _add_user_to_account_groups(self, user, account): + """Add users to default account groups""" + groups = Group.objects.filter( + extended_group__account=account, + extended_group__role__in=[ + ExtendedGroup.Role.ROLE_ACCOUNT_ADMIN, + ExtendedGroup.Role.ROLE_DEFAULT, + ], + ) + for group in groups: + user.groups.add(group) + + def get_checkout_session_url(self, instance: Account): + return instance.settings["last_checkout_session"]["url"] diff --git a/src/core/api/app/billing/signals.py b/src/core/api/app/billing/signals.py new file mode 100644 index 00000000..863e1ceb --- /dev/null +++ b/src/core/api/app/billing/signals.py @@ -0,0 +1,122 @@ +from django.contrib.auth.models import Permission +from django.core import serializers +from django.db.models.signals import m2m_changed, post_save, pre_delete, pre_save +from django.dispatch import receiver +from projects.models import Environment +from users.models import Account, Group, User + +from lib.utils import m2m_changed_subjects_and_objects + +from .models import Event + + +@receiver( + m2m_changed, + sender=User.groups.through, + dispatch_uid="billing.handle_user_groups_changed", +) +def handle_user_groups_changed(sender, **kwargs): + users, group_pks = m2m_changed_subjects_and_objects(kwargs) + action = kwargs["action"] + if action in ("post_remove", "post_add"): + permissions_granted_by_groups = Permission.objects.filter( + group__in=group_pks, + name__contains="|workbench:", + ).values_list("name", flat=True) + accounts = Account.from_permission_names(permissions_granted_by_groups) + + for user in users: + # We don't want to bill datacoves superusers + if user.is_superuser: + continue + context = { + "user": serializers.serialize("python", [user])[0], + "groups": list(group_pks), + } + if action == "post_add": + for account in accounts: + Event.objects.track(account, Event.GROUPS_ADDED, context) + else: + for account in accounts: + Event.objects.track(account, Event.GROUPS_DELETED, context) + + +@receiver( + m2m_changed, + sender=Group.permissions.through, + dispatch_uid="billing.handle_group_permissions_changed", +) +def handle_group_permissions_changed(sender, **kwargs): + groups, permission_pks = m2m_changed_subjects_and_objects(kwargs) + action = kwargs["action"] + if action in ("post_remove", "post_add"): + names = Permission.objects.filter( + pk__in=permission_pks, + name__contains="|workbench:", + ).values_list("name", flat=True) + + accounts = Account.from_permission_names(names) + + for group in groups: + context = { + "group": serializers.serialize("python", [group])[0], + "permissions": list(permission_pks), + } + if action == "post_add": + for account in accounts: + Event.objects.track(account, Event.PERMISSIONS_ADDED, context) + else: + for account in accounts: + Event.objects.track(account, Event.PERMISSIONS_DELETED, context) + + +@receiver( + post_save, sender=Environment, dispatch_uid="billing.handle_environment_post_save" +) +def handle_environment_post_save(sender, **kwargs): + env = kwargs["instance"] + if kwargs["created"]: + Event.objects.track( + env.project.account, + Event.ENV_CREATED, + { + "id": env.id, + "services": list(env.enabled_and_valid_services()), + }, + ) + + +@receiver( + pre_save, sender=Environment, dispatch_uid="billing.handle_environment_pre_save" +) +def handle_environment_pre_save(sender, **kwargs): + env = kwargs["instance"] + if env.pk: + old_env = Environment.objects.get(pk=env.pk) + if env.enabled_and_valid_services() != old_env.enabled_and_valid_services(): + Event.objects.track( + env.project.account, + Event.SERVICES_CHANGED, + { + "id": env.id, + "services": list(env.enabled_and_valid_services()), + "previous_services": list(old_env.enabled_and_valid_services()), + }, + ) + + +@receiver( + pre_delete, + sender=Environment, + dispatch_uid="billing.handle_environment_pre_delete", +) +def handle_environment_pre_delete(sender, **kwargs): + env = kwargs["instance"] + Event.objects.track( + env.project.account, + Event.ENV_DELETED, + { + "id": env.id, + "services": list(env.enabled_and_valid_services()), + }, + ) diff --git a/src/core/api/app/billing/slack.py b/src/core/api/app/billing/slack.py new file mode 100644 index 00000000..5064faa0 --- /dev/null +++ b/src/core/api/app/billing/slack.py @@ -0,0 +1,27 @@ +from django.conf import settings + +from lib.slack import post_slack_message + + +def report_event_to_slack(event, cluster): + label = "[TEST] " if cluster.is_local else "" + text = f"{label}A new billing event requiring approval was just created." + blocks = [ + { + "type": "section", + "text": {"text": text, "type": "mrkdwn"}, + "fields": [ + {"type": "mrkdwn", "text": "*Account*"}, + {"type": "mrkdwn", "text": "*Event Type*"}, + {"type": "plain_text", "text": event.account.name}, + {"type": "plain_text", "text": event.get_event_type_display()}, + ], + "accessory": { + "type": "button", + "text": {"type": "plain_text", "text": "Review events"}, + "url": f"https://api.{cluster.domain}/panel/billing" + "/event/?approval_status__exact=P", + }, + } + ] + post_slack_message(settings.SLACK_BILLING_CHANNEL, text, blocks) diff --git a/src/core/api/app/billing/tasks.py b/src/core/api/app/billing/tasks.py new file mode 100644 index 00000000..68e2d218 --- /dev/null +++ b/src/core/api/app/billing/tasks.py @@ -0,0 +1,86 @@ +from datetime import timedelta + +from billing import manager +from billing.models import Tally, TallyMark +from clusters import prometheus +from django.conf import settings +from projects.models import Environment +from users.models import Account + +from datacoves.celery import app +from lib import utils + + +@app.task +def inform_billing_events(): + manager.inform_billing_events() + + +@app.task +def tally_resource_usage(): + for account_slug in Account.objects.all().values_list("slug", flat=True): + tally_account_resource_usage.delay(account_slug) + + +@app.task +def tally_account_resource_usage(account_slug): + # Using account_slug to ease debugging on flower + tally_airflow_workers_usage(account_slug) + tally_airbyte_workers_usage(account_slug) + account = Account.objects.only("subscription").get(slug=account_slug) + if account.subscription and settings.BILLING_ENABLED: + report_usage_to_stripe.delay(account_slug) + + +@app.task +def report_usage_to_stripe(account_slug): + if not settings.TALLY_START: + return + manager.report_usage_to_stripe(account_slug) + + +def tally_airflow_workers_usage(account_slug): + name = settings.TALLY_AIRFLOW_WORKERS_NAME + tally_workers_usage(account_slug, name, "airflow-worker", ".+", container="base") + + +def tally_airbyte_workers_usage(account_slug): + name = settings.TALLY_AIRBYTE_WORKERS_NAME + tally_workers_usage(account_slug, name, "airbyte", "worker-pod|job-pod") + + +def tally_workers_usage( + account_slug, tally_name, pod_label, pod_label_regex, container=None +): + if not settings.TALLY_START: + return + yesterday = utils.yesterday() + first_day = max(settings.TALLY_START, yesterday - settings.TALLY_WINDOW) + for env in Environment.objects.filter(project__account__slug=account_slug): + tally, _ = Tally.objects.update_or_create( + account_id=env.project.account_id, + project_id=env.project_id, + environment=env, + name=tally_name, + defaults={"period": timedelta(days=1)}, + ) + days_with_marks = [ + d.date() + for d in tally.marks.filter( + time__gte=first_day, time__lte=yesterday + ).values_list("time", flat=True) + ] + day = yesterday + while day >= first_day: + if day.date() not in days_with_marks: + total = prometheus.get_by_label_pods_running_day_total_seconds( + day, + env.k8s_namespace, + pod_label, + pod_label_regex, + container=container, + ) + tm, _ = TallyMark.objects.update_or_create( + tally=tally, time=day, defaults={"amount": total} + ) + day -= timedelta(days=1) diff --git a/src/core/api/app/billing/tests.py b/src/core/api/app/billing/tests.py new file mode 100644 index 00000000..84c8807b --- /dev/null +++ b/src/core/api/app/billing/tests.py @@ -0,0 +1,404 @@ +from datetime import datetime, timedelta +from unittest.mock import patch + +from dateutil.relativedelta import relativedelta +from django.conf import settings +from django.test import TestCase +from django.utils import timezone +from dotmap import DotMap +from factories import ( + AccountFactory, + ClusterFactory, + EnvironmentFactory, + PlanFactory, + ProductFactory, + ProjectFactory, + TallyFactory, + TallyMarkFactory, + UserFactory, + items, +) + +from lib import utils + +from .manager import create_checkout_session, report_usage_to_stripe +from .models import Event, Plan, TallyMark + + +class KubectlMock: + """Mock class to Kubectl client""" + + def get_ingress_controller_ips(self): + return "10.0.0.10", "192.168.100.10" + + def get_cluster_apiserver_ips(self): + return {} + + +class CeleryInspectMock: + """Mock class to Celery Inspect""" + + def reserved(self): + return {} + + +customer_data = { + "id": "cus_P5kbR1mwSv8j4x", + "object": "customer", + "address": None, + "balance": 0, + "created": 1701207936, + "currency": None, + "default_currency": None, + "default_source": None, + "delinquent": False, + "description": None, + "discount": None, + "email": "test@datacoveslocal.com", + "invoice_prefix": "65A2AE89", + "invoice_settings": { + "custom_fields": None, + "default_payment_method": None, + "footer": None, + "rendering_options": None, + }, + "livemode": False, + "metadata": {}, + "name": "test-2", + "next_invoice_sequence": 1, + "phone": None, + "preferred_locales": [], + "shipping": None, + "tax_exempt": "none", + "test_clock": None, +} + + +session_data = { + "customer_update": {"address": "auto", "name": "auto"}, + "automatic_tax": {"enabled": "True"}, + "cancel_url": "https://datacoveslocal.com/admin/billing/cancel", + "customer": "cus_P5kbR1mwSv8j4x", + "line_items": {"0": {"quantity": "1", "price": "price_1NxZJ8LF8qmfSSrQgfUna6jl"}}, + "success_url": "https://datacoveslocal.com/admin/billing/checkout?session_id={CHECKOUT_SESSION_ID}", + "mode": "subscription", + "subscription_data": {"metadata": {"plan": "growth-monthly"}}, +} + +subscription_data = { + "id": "sub_1NwqcHLF8qmfSSrQjddq25wG", + "plan": None, + "items": [ + { + "id": "si_OqfzMrWVnSFVpr", + "plan": { + "id": "price_1M5F43LF8qmfSSrQ4YMKtmwH", + "active": True, + "amount": 10, + "object": "plan", + "created": 1668718323, + "product": "prod_MosppM3RQpT7a8", + "currency": "usd", + "interval": "month", + "livemode": True, + "metadata": {}, + "nickname": "pro", + "usage_type": "metered", + "amount_decimal": "10", + "billing_scheme": "per_unit", + "interval_count": 1, + "aggregate_usage": "sum", + }, + "price": { + "id": "price_1M5F43LF8qmfSSrQ4YMKtmwH", + "type": "recurring", + "active": True, + "object": "price", + "created": 1668718323, + "product": "prod_MosppM3RQpT7a8", + "currency": "usd", + "metadata": {}, + "nickname": "pro", + "recurring": { + "interval": "month", + "usage_type": "metered", + "interval_count": 1, + "aggregate_usage": "sum", + }, + "unit_amount": 10, + "tax_behavior": "exclusive", + "billing_scheme": "per_unit", + "unit_amount_decimal": "10", + }, + "object": "subscription_item", + "created": 1697731200, + "metadata": {}, + "tax_rates": [], + "subscription": "sub_1NwqcHLF8qmfSSrQjddq25wG", + } + ], + "object": "subscription", + "status": "active", + "created": 1696270393, + "currency": "usd", + "customer": "cus_OkLIFJdhwTvFfC", + "ended_at": None, + "metadata": {"plan": "growth-monthly"}, + "start_date": 1696270393, + "latest_invoice": "in_1OIxibLF8qmfSSrQ9PzQnJeU", + "trial_settings": {"end_behavior": {"missing_payment_method": "create_invoice"}}, + "collection_method": "charge_automatically", + "default_tax_rates": [], + "current_period_end": 1704219193, + "billing_cycle_anchor": 1696270393, + "cancel_at_period_end": False, + "current_period_start": 1701540793, + "default_payment_method": "pm_1NwqcGLF8qmfSSrQNAO8taiA", +} + + +class BillingTests(TestCase): + """ + Test TallyMark different scenarios: + TODO: mock stripe so that calling Stripe when amount > 0 can be tested + """ + + @patch("lib.kubernetes.client.Kubectl", return_value=KubectlMock()) + @patch("datacoves.celery.app.control.inspect", return_value=CeleryInspectMock()) + @patch("billing.manager._get_si_for_tally", return_value=[]) + def setUp(self, mock_si, mock_inspect, mock_kubernetes) -> None: + self.cluster = ClusterFactory.create() + self.project = ProjectFactory.create() + self.plan = PlanFactory.create() + self.product = ProductFactory.create() + self.project.plan = self.plan + self.account = self.project.account + self.account.plan = self.plan + + enabled = {"valid": True, "enabled": True, "unmet_preconditions": []} + services_data = { + "airbyte": {"enabled": False}, + "airflow": enabled, + "dbt-docs": enabled, + "superset": {"enabled": False}, + "code-server": enabled, + } + internal_services_data = {"minio": {"enabled": True}} + self.environment = EnvironmentFactory.create( + cluster=self.cluster, + project=self.project, + services=services_data, + internal_services=internal_services_data, + ) + + def test_tally_mark_in_current_period_with_usage(self): + """ + Happy path: + subscribed date < usage date + current_period_start < usage date + """ + self._create_tallies(self.account) + start_date = timezone.now() - timedelta(days=2) + current_period_start = start_date + current_period_end = current_period_start + relativedelta(months=1) + self._set_account_subscription( + start_date, current_period_start, current_period_end + ) + report_usage_to_stripe(self.account.slug) + + tally_mark = TallyMark.objects.first() + self.assertIs(tally_mark.status, Event.STATUS_PROCESSED) + self._destroy_tallies() + + def test_tally_mark_before_subscription(self): + """ + Ignore tally mark: + subscribed date > usage date + """ + self._create_tallies(self.account) + start_date = timezone.now() + current_period_start = start_date + current_period_end = current_period_start + relativedelta(months=1) + self._set_account_subscription( + start_date, current_period_start, current_period_end + ) + report_usage_to_stripe(self.account.slug) + tally_mark = TallyMark.objects.first() + self.assertIs(tally_mark.status, Event.STATUS_IGNORED) + self._destroy_tallies() + + def test_tally_mark_after_period_started(self): + """ + Tally Mark updates time to be after current period started: + subscribed date < usage date + current_period_start > usage date + """ + self._create_tallies(self.account) + self._create_tally_for_duplicate_index() + + start_date = timezone.now() - relativedelta(months=1) + current_period_start = timezone.now() + current_period_end = current_period_start + relativedelta(months=1) + self._set_account_subscription( + start_date, current_period_start, current_period_end + ) + report_usage_to_stripe(self.account.slug) + for tally_mark in TallyMark.objects.all(): + self.assertIs(tally_mark.status, Event.STATUS_PROCESSED) + self.assertGreaterEqual(tally_mark.time, current_period_start) + + self._destroy_tallies() + self.tally_mark_duplicate_index.delete() + + def test_current_period_in_sync(self): + """ + Datacoves current period and Stripe current period need to be in sync. + 'current_period_end' cannot be in the past. Happy case. + """ + current_period_start = datetime.now( + timezone.get_default_timezone() + ) - timedelta(days=15) + start_date = current_period_start - relativedelta(months=1) + current_period_end = current_period_start + relativedelta(months=1) + self._set_account_subscription( + start_date, current_period_start, current_period_end + ) + self.account.subscription = subscription_data + self._create_tallies(self.account) + report_usage_to_stripe(self.account.slug) + tally_mark = TallyMark.objects.first() + self.assertIs(tally_mark.status, Event.STATUS_PROCESSED) + self.assertGreaterEqual(tally_mark.time, current_period_start) + self._destroy_tallies() + + def test_current_period_out_sync(self): + """ + Datacoves current period and Stripe current period need to be in sync. + 'current_period_end' cannot be in the past. Error case. + """ + self.account.plan = PlanFactory.create( + slug="test_current_period_out_sync", kind=Plan.KIND_GROWTH + ) + current_period_start = datetime.now( + timezone.get_default_timezone() + ) - relativedelta(months=2) + current_period_end = current_period_start + relativedelta(months=1) + self._set_account_subscription( + current_period_start, current_period_start, current_period_end + ) + self._create_tallies(self.account) + self.assertRaises(AssertionError, report_usage_to_stripe, self.account.slug) + self._destroy_tallies() + + def test_current_period_out_sync_custom_plan(self): + """ + Datacoves current period and Stripe current period need to be in sync. + 'current_period_end' does not apply for custom plan. + """ + self.account.plan = PlanFactory.create( + slug="test_current_period_out_sync_custom_plan", kind=Plan.KIND_CUSTOM + ) + current_period_start = datetime.now( + timezone.get_default_timezone() + ) - relativedelta(months=2) + current_period_end = current_period_start + relativedelta(months=1) + self._set_account_subscription( + current_period_start, current_period_start, current_period_end + ) + self._create_tallies(self.account) + report_usage_to_stripe(self.account.slug) + tally_mark = TallyMark.objects.first() + self.assertIs(tally_mark.status, Event.STATUS_IGNORED) + self._destroy_tallies() + + @patch("datacoves.celery.app.control.inspect", return_value=CeleryInspectMock()) + def _set_account_subscription( + self, + start_date, + current_period_start, + current_period_end, + mock_inspect, + plan=None, + ): + if plan: + self.account.plan = plan + self.account.plan.save() + + self.account.subscription = { + "start_date": datetime.timestamp(start_date), + "current_period_start": datetime.timestamp(current_period_start), + "current_period_end": datetime.timestamp(current_period_end), + "items": items, + } + self.account.save() + + def _create_tallies(self, account): + self.tally = TallyFactory.create( + account=account, + project=self.project, + environment=self.environment, + name=settings.TALLY_AIRFLOW_WORKERS_NAME, + period=timedelta(days=1), + ) + self.tally_mark = TallyMarkFactory.create( + tally=self.tally, + time=utils.yesterday(), + amount=0, + status=Event.STATUS_PENDING, + ) + + def _create_tally_for_duplicate_index(self): + self.tally_mark_duplicate_index = TallyMarkFactory.create( + tally=self.tally, + time=utils.yesterday() + timedelta(seconds=1), + amount=0, + status=Event.STATUS_PENDING, + ) + + def _destroy_tallies(self): + self.tally.delete() + self.tally_mark.delete() + + @patch("stripe.checkout.Session.create") + @patch("stripe.Customer.create") + @patch("datacoves.celery.app.control.inspect") + def test_create_checkout_session_no_customer( + self, inspect_mock, create_customer_mock, create_checkout_session_mock + ): + inspect_mock.return_value = CeleryInspectMock() + create_customer_mock.return_value = DotMap(customer_data) + create_checkout_session_mock.return_value = DotMap(session_data) + + plan_slug = "growth-monthly" + PlanFactory.create(slug=plan_slug) + user = UserFactory.create() + account = AccountFactory.build(created_by=user) + variant = "standard" + domain = "api.datacoveslocal.com" + create_checkout_session(account, plan_slug, variant, domain) + self.assertIs( + account.settings["last_checkout_session"]["customer"], "cus_P5kbR1mwSv8j4x" + ) + + @patch("stripe.checkout.Session.create") + @patch("stripe.Customer.retrieve") + @patch("datacoves.celery.app.control.inspect") + def test_create_checkout_session_customer_exists( + self, inspect_mock, retrieve_customer_mock, create_checkout_session_mock + ): + inspect_mock.return_value = CeleryInspectMock() + retrieve_customer_mock.return_value = DotMap(customer_data) + create_checkout_session_mock.return_value = DotMap(session_data) + + plan_slug = "growth-monthly" + PlanFactory.create(slug=plan_slug) + user = UserFactory.create() + account = AccountFactory.build( + created_by=user, customer_id="cus_P5kbR1mwSv8j4x" + ) + domain = "api.datacoveslocal.com" + variant = "standard" + create_checkout_session(account, plan_slug, variant, domain) + self.assertIs( + account.settings["last_checkout_session"]["customer"], "cus_P5kbR1mwSv8j4x" + ) diff --git a/src/core/api/app/billing/views.py b/src/core/api/app/billing/views.py new file mode 100644 index 00000000..f4c2f8b6 --- /dev/null +++ b/src/core/api/app/billing/views.py @@ -0,0 +1,67 @@ +import json +import logging +from collections import OrderedDict + +import stripe +from billing import manager # noqa: F401. Imported to configure stripe. +from billing.serializers import AccountSubscriptionSerializer +from core.mixins.views import VerboseCreateModelMixin +from django.conf import settings +from django.views.decorators.csrf import csrf_exempt +from rest_framework import generics +from rest_framework.decorators import api_view, renderer_classes +from rest_framework.permissions import IsAuthenticated +from rest_framework.renderers import JSONRenderer +from rest_framework.response import Response + + +class SubscribeAccount(VerboseCreateModelMixin, generics.CreateAPIView): + serializer_class = AccountSubscriptionSerializer + permission_classes = [IsAuthenticated] + + +@api_view(("POST",)) +@renderer_classes((JSONRenderer,)) +@csrf_exempt +def stripe_webhook(request): + payload = request.body + webhook_secret = settings.STRIPE_WEBHOOK_SECRET + event = None + try: + if webhook_secret: + signature = request.headers.get("Stripe-Signature") + event = stripe.Webhook.construct_event(payload, signature, webhook_secret) + else: + logging.warning( + "stripe_webhook: won't verify request, STRIPE_WEBHOOK_SECRET not set." + ) + # The rest of this block is the stripe.Webhook.construct_event source + # code without the line that does signature verification. + if hasattr(payload, "decode"): + payload = payload.decode("utf-8") + data = json.loads(payload, object_pairs_hook=OrderedDict) + event = stripe.Event.construct_from(data, stripe.api_key) + except ValueError: + logging.error("stripe_webhook: bad request") + return Response(status=400) + except stripe.error.SignatureVerificationError: + logging.error("stripe_webhook: signature verification error") + return Response(status=400) + + # Seems like too much data to save it all, and too little context to + # decide what is relevant. Let each handlers do the logging instead. + # log_stripe_event(type=event.type, id=event.id, **event.data) + + handler = getattr(manager, "handle_" + event.type.replace(".", "_"), None) + if not handler: + # log_stripe_event(type=event.type, data=event.data, id=event.id, handled=False) + # TODO: It doesn't seem wise to respond OK to all events without + # handling them. Uncomment the following line and fix the consequences. + # Configure stripe so we don't get sent the webhooks we don't care about. + return Response({}, status=501) + # return Response({"status": "success"}) + response = handler(event) + # So that handlers don't have to return responses if they don't care. + response = response or Response({"status": "success"}) + response = Response(response) if isinstance(response, dict) else response + return response diff --git a/src/core/api/app/clusters/__init__.py b/src/core/api/app/clusters/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/adapters/__init__.py b/src/core/api/app/clusters/adapters/__init__.py new file mode 100644 index 00000000..15a04328 --- /dev/null +++ b/src/core/api/app/clusters/adapters/__init__.py @@ -0,0 +1,616 @@ +import tempfile +from typing import Optional + +import boto3 +from boto3.exceptions import S3UploadFailedError +from clusters.models import Cluster +from clusters.tasks import setup_db_read_only_for_service +from django.conf import settings +from django.utils import timezone +from oauth2_provider.generators import generate_client_id, generate_client_secret +from oauth2_provider.models import Application +from packaging import version +from projects.git import try_git_clone +from projects.models import Environment, UserEnvironment +from users.models import User + +from lib.config import config as the + +from ..external_resources.postgres import create_read_only_user_for_service + + +class Adapter: + """WARNING: Be mindful that these methods may be called in a loop, + such as by workspace.sync. Thus, it is very easy to accidentally + create a situation where one of these methods creates a performance + issue. + + You can add to the select_related / prefetch_related fields in + workspace.SyncTask and use the is_relation_cached to use pre-fetched + data instead of running queries in these methods. + """ + + service_name = None + linked_service_names = [] + deployment_name = None + supported_integrations = [] + chart_features = {} + + GENERAL_NODE_SELECTOR = the.GENERAL_NODE_SELECTOR + WORKER_NODE_SELECTOR = the.WORKER_NODE_SELECTOR + VOLUMED_NODE_SELECTOR = the.VOLUMED_NODE_SELECTOR + + @classmethod + def _normalize_name(cls, name: str) -> str: + if name: + return name.replace(" ", "-").lower() + + return name + + @classmethod + def config_attr(cls) -> str: + return f"{cls.service_name.replace('-', '_')}_config" + + @classmethod + def is_enabled(cls, env: Environment) -> bool: + "Returns if service is enabled" + return cls.always_enabled or env.is_service_enabled(cls.service_name) + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + "Returns the list of resources to be created on kubernetes" + raise NotImplementedError() + + @classmethod + def remove_resources(cls, env: Environment) -> list: + "Returns the list of resources to be removed on kubernetes" + return [] + + @classmethod + def _get_labels_adapter(cls, query_format=False, exclude=False) -> any: + """Returns labels to add to K8s resources. + Args: + query_format (bool, optional): if it is true change the format to key=value . Defaults to False. + Returns: + dict: if query_format is False + str: if query_format is True + """ + label_value = cls.service_name if cls.service_name else "" + labels = {"datacoves.com/adapter": label_value} + if query_format: + operator = "notin" if exclude else "in" + labels = ",".join([f"{k} {operator} ({v})" for k, v in labels.items()]) + + return labels + + @classmethod + def get_cluster_default_config(cls, cluster: Cluster, source: dict = None) -> dict: + """Returns the default config + Args: + cluster (Cluster): Current cluster + source (dict, optional): Config from some source. Defaults to None. + Returns: + dict: Config + """ + try: + adapter_config_attr = cls.config_attr() + config = getattr(cluster, adapter_config_attr) + config = {} if config is None else config.copy() + if source: + config.update(source) + + return config + except AttributeError: + return {} + + @classmethod + def get_oidc_groups(cls, env: Environment, user): + """Returns the oidc groups needed by the service to grant roles and permissions""" + return [] + + +class EnvironmentAdapter(Adapter): + """WARNING: Be mindful that these methods may be called in a loop, + such as by workspace.sync. Thus, it is very easy to accidentally + create a situation where one of these methods creates a performance + issue. + + You can add to the select_related / prefetch_related fields in + workspace.SyncTask and use the is_relation_cached to use pre-fetched + data instead of running queries in these methods. + """ + + @classmethod + def get_default_values(cls, env=None) -> dict: + """Returns defaults values for adapter config, useful specially on front end forms""" + return {} + + @classmethod + def is_enabled(cls, env: Environment) -> bool: + "Returns if service is enabled" + return env.is_service_enabled(cls.service_name) + + @classmethod + def is_enabled_and_valid(cls, env: Environment) -> bool: + "Returns if service is enabled" + return env.is_service_enabled_and_valid(cls.service_name) + + @classmethod + def sync_external_resources(cls, env: Environment): + "Creates external resources if necessary and updates the env config accordingly." + return + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + return {} + + @classmethod + def get_unmet_preconditions(cls, env: Environment): + "Returns a list of preconditions that where not met." + return [] + + @classmethod + def get_user_unmet_preconditions(cls, ue: UserEnvironment): + """Returns a list of user preconditions that where not met.""" + return [] + + @classmethod + def get_user_unmet_preconditions_bulk(cls, ue_list) -> dict: + """Does get_user_unmet_preconditions, except optimized for bulk + results. Maps the UserEnvironment.id to the results of + get_user_unmet_preconditions. + + By default, this operates in a simple loop. + """ + + return {ue.id: cls.get_user_unmet_preconditions(ue) for ue in ue_list} + + @classmethod + def get_user_linked_services_unmet_preconditions( + cls, service_name: str, ue: UserEnvironment + ): + """Returns a list of user preconditions that where not met. + + Be aware that this method has the potential to impact workspace.sync + as it is run in a loop. It is currently unused in any particular + way, but if in the future it is used, there should be a _bulk version + made if database queries or other resource heavy activities are + anticipated. + """ + return [] + + @classmethod + def get_datacoves_versions(cls, env: Environment) -> dict: + """Return Datacoves and Environment release versions""" + return { + "DATACOVES__VERSION": env.cluster.release.name, + "DATACOVES__VERSION_MAJOR_MINOR": ".".join( + env.cluster.release.version_components[:2] + ), + "DATACOVES__VERSION__ENV": env.release.name, + "DATACOVES__VERSION_MAJOR_MINOR__ENV": ".".join( + env.release.version_components[:2] + ), + "DATACOVES__SQLFLUFF_VERSION": "3.1.1", + } + + @classmethod + def _external_db_config_unmet_preconditions(cls, config: dict) -> list: + db_config = config["db"] + if not db_config.get("external", False): + return [] + + is_valid = ( + "host" in db_config + and "user" in db_config + and "password" in db_config + and "database" in db_config + ) or "connection" in db_config + unmets = [] + if not is_valid: + unmets.append( + { + "code": "missing_keys_in_external_db_config", + "message": "Missing 'host', 'user', 'password', 'database', " + "or 'connection' in db connection string", + } + ) + return unmets + + @classmethod + def _check_write_access(cls, s3_client, bucket_name): + with tempfile.NamedTemporaryFile(mode="w") as tmp_file: + tmp_file.write("Test S3 Upload") + test_object_key = "test_object.txt" + s3_client.upload_file(tmp_file.name, bucket_name, test_object_key) + s3_client.delete_object(Bucket=bucket_name, Key=test_object_key) + + @classmethod + def _external_logs_config_unmet_preconditions( + cls, config: dict, env: Environment + ) -> list: + if not config["logs"].get("external", False): + return [] + + logs_config = config["logs"] + backend = logs_config["backend"] + cluster_provider: str = env.cluster.provider + logs_unmets = { + Cluster.LOGS_BACKEND_EFS: cls._external_logs_config_unmet_preconditions_efs, + Cluster.LOGS_BACKEND_AFS: cls._external_logs_config_unmet_preconditions_afs, + Cluster.LOGS_BACKEND_S3: cls._external_logs_config_unmet_preconditions_s3, + Cluster.LOGS_BACKEND_NFS: cls._external_logs_config_unmet_preconditions_nfs, + } + + if backend not in logs_unmets.keys(): + return [ + { + "code": "invalid_config_in_external_logs", + "message": f"Missing valid log configuration for '{backend}'", + } + ] + + return logs_unmets[backend](logs_config, cluster_provider) + + @classmethod + def _external_logs_config_unmet_preconditions_efs( + cls, config: dict, cluster_provider: str + ) -> list: + unmets = [] + if "volume_handle" not in config: + unmets.append( + { + "code": "no_volume_handle_in_external_logs_config", + "message": "Missing 'volume_handle' in EFS configuration", + } + ) + + if cluster_provider not in (Cluster.EKS_PROVIDER, Cluster.KIND_PROVIDER): + unmets.append( + { + "code": "provider_wrong_in_external_logs_config", + "message": "EFS invalid cluster provider", + } + ) + return unmets + + @classmethod + def _external_logs_config_unmet_preconditions_afs( + cls, config: dict, cluster_provider: str + ) -> list: + unmets = [] + if cluster_provider not in (Cluster.AKS_PROVIDER, Cluster.KIND_PROVIDER): + unmets.append( + { + "code": "provider_wrong_in_external_logs_config", + "message": "Azure File invalid cluster provider", + } + ) + return unmets + + @classmethod + def _external_logs_config_unmet_preconditions_nfs( + cls, config: dict, cluster_provider: str + ) -> list: + unmets = [] + if cluster_provider != Cluster.KIND_PROVIDER: + unmets.append( + { + "code": "provider_wrong_in_external_logs_config", + "message": "NFS invalid cluster provider", + } + ) + return unmets + + @classmethod + def _external_logs_config_unmet_preconditions_s3( + cls, config: dict, cluster_provider: str + ) -> list: + unmets = [] + if not ( + "s3_log_bucket" in config + and "access_key" in config + and "secret_key" in config + ): + unmets.append( + { + "code": "missing_keys_in_external_logs_config", + "message": "Missing 's3_log_bucket'," + " 'access_key' or 'secret_key' in S3 logs configuration", + } + ) + else: + # Check write access + try: + s3_client = boto3.client( + "s3", + aws_access_key_id=config.get("access_key"), + aws_secret_access_key=config.get("secret_key"), + ) + bucket_name = config.get("s3_log_bucket") + cls._check_write_access(s3_client, bucket_name) + except S3UploadFailedError: + unmets.append( + { + "code": "invalid_s3_sync_config_using_iam_user", + "message": "Unable to write S3 objects", + } + ) + except Exception as exc: + unmets.append( + { + "code": "invalid_s3_sync_config_using_iam_user", + "message": str(exc), + } + ) + return unmets + + @classmethod + def get_internal_service_config(cls, env: Environment, name: str) -> dict: + """Configuration to be used by internal services, i.e. minio""" + return None + + @classmethod + def get_public_url(cls, env: Environment): + subdomain = cls.subdomain.format(env_slug=env.slug) + return f"https://{subdomain}.{env.cluster.domain}" + + @classmethod + def get_oidc_config( + cls, env: Environment, path: str, service_name: str = None, user: User = None + ) -> dict: + """If not provided, service_name will be derived from the class' + service_name field (default behavior). Otherwise, we will use the + provided service name. + + If 'user' is provided, it will be passed to get_public_url + """ + + if not user: + public_url = cls.get_public_url(env) + else: + public_url = cls.get_public_url(env, user) + + client_secret = generate_client_secret() + client_id = generate_client_id() + + # this is necessary to avoid installing a self-signed cert on airflow/superset + # requires signing in on a separate tab + scheme = "http" if env.cluster.is_local else "https" + + if service_name is not None: + name = f"{env.slug}-{service_name}" + else: + name = f"{env.slug}-{cls.service_name}" + + Application.objects.filter(name=name).delete() + Application.objects.create( + name=name, + client_secret=client_secret, + client_id=client_id, + client_type="confidential", + authorization_grant_type="authorization-code", + redirect_uris=(public_url + path), + algorithm="RS256", + skip_authorization=True, + ) + + return { + "idp_provider": "datacoves", + "idp_provider_url": f"{scheme}://api.{env.cluster.domain}/auth", + "idp_client_id": client_id, + "idp_client_secret": client_secret, + "idp_scopes": list(settings.OAUTH2_PROVIDER["SCOPES"].keys()), + } + + @classmethod + def get_cluster_oidc_config(cls, service_name, subdomain, path: str) -> dict: + cluster = Cluster.objects.current().first() + public_url = f"https://{subdomain}.{cluster.domain}" + # + client_secret = generate_client_secret() + client_id = generate_client_id() + + # this is necessary to avoid installing a self-signed cert on grafana + # requires signing in on a separate tab + scheme = "http" if cluster.is_local else "https" + + name = f"cluster-{service_name}" + app, created = Application.objects.get_or_create( + name=name, + defaults={ + "client_secret": client_secret, + "client_id": client_id, + "client_type": "confidential", + "authorization_grant_type": "authorization-code", + "redirect_uris": (public_url + path), + "algorithm": "RS256", + "skip_authorization": True, + }, + ) + if created: + cluster.grafana_settings["oidc"] = {} + cluster.grafana_settings["oidc"]["client_secret"] = client_secret + cluster.save() + + return { + "idp_provider": "datacoves", + "idp_provider_url": f"{scheme}://api.{cluster.domain}", + "idp_client_id": app.client_id, + "idp_client_secret": cluster.grafana_settings["oidc"]["client_secret"], + "idp_scopes": list(settings.OAUTH2_PROVIDER["SCOPES"].keys()), + } + + @classmethod + def get_writable_config(cls, env: Environment) -> dict: + "Returns a dict containing the fields of the config dict a user could change" + raise NotImplementedError() + + @classmethod + def get_enabled_integrations(cls, env: Environment, type: str): + return env.integrations.filter(service=cls.service_name, integration__type=type) + + @staticmethod + def _get_git_creds(env: Environment): + """Warning: this refetches Azure credentials every time, which is + time-costly. It would be wiser in the future to use cached + credentials like the dynamic_repo_credentials method in the + projects view, but that polish can come later as we're not even + really using this feature yet. + """ + + project = env.project + + git_creds = {} + if project.clone_strategy == project.SSH_CLONE_STRATEGY: + git_creds["git_url"] = project.repository.git_url + git_creds["ssh_key_private"] = project.deploy_key.private + elif project.clone_strategy.startswith("azure"): + project.update_oauth_if_needed() + + git_creds["git_url"] = project.repository.url + git_creds["username"] = project.deploy_credentials["oauth_username"] + git_creds["password"] = project.deploy_credentials["oauth_password"] + + else: + git_creds["git_url"] = project.repository.url + git_creds["username"] = project.deploy_credentials["git_username"] + git_creds["password"] = project.deploy_credentials["git_password"] + + return git_creds + + @staticmethod + def _try_git_clone(env: Environment, branch: str, git_creds: dict): + try_git_clone( + env.project.clone_strategy, + git_creds["git_url"], + branch=branch, + ssh_key_private=git_creds.get("ssh_key_private"), + username=git_creds.get("username"), + password=git_creds.get("password"), + ) + + @classmethod + def _git_clone_unmet_precondition(cls, env: Environment): + config_attr = cls.service_name.replace("-", "_") + "_config" + config = getattr(env, config_attr) + try: + git_creds = cls._get_git_creds(env) + branch = config["git_branch"] + if ( + config.get("git_validated_at") + and config.get("git_validated_creds") == git_creds + and config.get("git_validated_branch") == branch + ): + return [] + cls._try_git_clone(env, branch, git_creds) + config.update( + { + "git_validated_at": str(timezone.now()), + "git_validated_creds": git_creds, + "git_validated_branch": branch, + } + ) + Environment.objects.filter(id=env.id).update(**{config_attr: config}) + return [] + except Exception as ex: + return [ + { + "code": cls.service_name + "_repository_not_valid", + "message": str(ex), + } + ] + + @classmethod + def _chart_version_unmet_precondition(cls, env: Environment): + chart_version = getattr(env.release, cls.service_name + "_chart")["version"] + if chart_version not in cls.chart_versions: + return [ + { + "code": cls.service_name + "_chart_version_not_supported", + "message": f"The chart version {chart_version} is not supported." + f" Supported versions: {cls.chart_versions}", + } + ] + else: + return [] + + @classmethod + def _is_feature_enabled(cls, feature, env): + """ + Returns true if the feature is enabled for the current version + """ + current_version = getattr(env.release, f"{cls.service_name}_chart")["version"] + current_parsed = version.parse(current_version) + target_version = cls.chart_features[feature] + operator, target_version = target_version.split(" ") + if operator == "<=": + return current_parsed <= version.parse(target_version) + elif operator == ">=": + return current_parsed >= version.parse(target_version) + elif operator == "==": + return current_parsed == version.parse(target_version) + elif operator == ">": + return current_parsed > version.parse(target_version) + elif operator == "<": + return current_parsed < version.parse(target_version) + else: + raise Exception(f"Invalid operator {operator}") + + @classmethod + def enable_service(cls, env: Environment, extra_config: list = None): + """This method is called for internal adapters to enable them if needed""" + pass + + @classmethod + def is_relation_cached(cls, model_obj, relation_name: str) -> bool: + """This checks to see if 'relation_name' is loaded in 'model_obj' + cache. This is useful for optimizations where we could loop over + cache instead of doing a SQL query in certain cases.""" + + # This functionality was moved to the model, and I will eventually + # remove its use from this classes that use this. + return model_obj.is_relation_cached(relation_name) + + @classmethod + def on_post_enabled(cls, env: Environment) -> dict: + return {} + + @classmethod + def _create_read_only_db_user( + cls, env: Environment, is_async=False + ) -> Optional[dict | None]: + if is_async: + setup_db_read_only_for_service.apply_async( + (env.slug, cls.service_name), countdown=15 + ) + + else: + config = getattr(env, cls.config_attr()) + # If the configuration already has a user configured, it is not configured again + if config and config.get("db_read_only"): + return config.get("db_read_only") + + if config and config.get("db") and config["db"]["external"]: + db_ro_data = create_read_only_user_for_service( + env=env, service_name=cls.service_name + ) + return db_ro_data + + return None + + @classmethod + def get_service_account_email(cls, env) -> str: + return f"{cls.service_name}-{env.slug}@{env.cluster.domain}" + + @classmethod + def setup_service_account(cls, env: Environment): + sa_user_email = cls.get_service_account_email(env) + sa_user, _ = User.objects.get_or_create( + email=sa_user_email, + defaults={ + "is_service_account": True, + "name": f"{cls.service_name} {env.slug} Service Account", + }, + ) + return sa_user diff --git a/src/core/api/app/clusters/adapters/airbyte.py b/src/core/api/app/clusters/adapters/airbyte.py new file mode 100644 index 00000000..a3dbbe5d --- /dev/null +++ b/src/core/api/app/clusters/adapters/airbyte.py @@ -0,0 +1,606 @@ +import secrets + +from clusters.models import Cluster +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from ..external_resources.postgres import create_database +from ..external_resources.s3 import create_bucket +from . import EnvironmentAdapter +from .minio import MinioAdapter + + +class AirbyteAdapter(EnvironmentAdapter): + service_name = settings.SERVICE_AIRBYTE + deployment_name = "{env_slug}-airbyte-server" + subdomain = "airbyte-{env_slug}" + chart_versions = ["0.48.8", "1.6.0"] + chart_features = { + # Custom registry for jobs + "jobs-custom-registry": "<= 0.48.8", + # Pods sweeper + "pod-sweeper": "<= 0.48.8", + # Airbyte URL + "airbyte-url": ">= 1.6.0", + # Global image registry + "global-image-registry": ">= 1.6.0", + # Internal / external DB + "internal-external-db": ">= 1.6.0", + # Storage refactor + "storage-refactor": ">= 1.6.0", + # Airbyte's own minio + "airbyte-minio": ">= 1.6.0", + "workload-launcher": ">= 1.6.0", + "connector-rollout-worker": ">= 1.6.0", + "airbyte-keycloak": ">= 1.6.0", + "workload-api-server": ">= 1.6.0", + } + + AIRBYTE_VALUES_CONFIG_MAP_NAME = "airbyte-values" + AIRBYTE_LOGS_SECRET_NAME = "airbyte-logs-secrets" + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + secrets = cls._gen_airbyte_secrets(env) + logs_secrets_name = secrets[0]["metadata"]["name"] + values = cls._gen_airbyte_values(env, logs_secrets_name) + + values_config_map = make.hashed_json_config_map( + name=cls.AIRBYTE_VALUES_CONFIG_MAP_NAME, + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + + return [values_config_map] + secrets + + @classmethod + def sync_external_resources(cls, env: Environment): + cls._sync_external_dbs(env) + cls._sync_external_logs(env) + + @classmethod + def _sync_external_dbs(cls, env: Environment): + if not env.airbyte_config["db"].get("external", False): + return + + if not env.cluster.has_dynamic_db_provisioning(): + return + + already_configured = ( + cls._external_db_config_unmet_preconditions(env.airbyte_config) == [] + ) + if already_configured: + return + + db_data = create_database(env=env, db_name="airbyte", can_create_db=True) + env.airbyte_config["db"].update(db_data) + Environment.objects.filter(id=env.id).update(airbyte_config=env.airbyte_config) + + @classmethod + def _sync_external_logs(cls, env: Environment): + if not env.airbyte_config["logs"].get("external", False): + return + + if not env.cluster.has_dynamic_blob_storage_provisioning(): + return + + already_configured = ( + cls._external_logs_config_unmet_preconditions(env.airbyte_config, env) == [] + ) + if already_configured: + return + + logs_data = create_bucket(env, "airbyte") + env.airbyte_config["logs"].update(logs_data) + Environment.objects.filter(id=env.id).update(airbyte_config=env.airbyte_config) + + @classmethod + def get_cluster_default_config(cls, cluster: Cluster, source: dict = None) -> dict: + config = super().get_cluster_default_config(cluster=cluster, source=source) + config.update( + { + "logs": config.get( + "logs", {"external": False, "backend": Cluster.LOGS_BACKEND_S3} + ), + "db": config.get( + "db", + { + "external": False, + "backend": "postgres", + "tls": True, + "tls_enabled": True, + "tls_disable_host_verification": True, + "host_verification": False, + }, + ), + } + ) + + return config + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.airbyte_config.copy() + if source: + config.update(source) + + db_conf = env.cluster.airbyte_config["db"] + config.update( + { + "db": config.get( + "db", + { + "external": db_conf["external"], + "password": secrets.token_urlsafe(32), + "tls": db_conf["tls"], + "tls_enabled": db_conf["tls_enabled"], + "tls_disable_host_verification": db_conf[ + "tls_disable_host_verification" + ], + "host_verification": db_conf["host_verification"], + }, + ), + "logs": config.get( + "logs", + { + "backend": ( + env.cluster.airbyte_config["logs"]["backend"] + if env.cluster.airbyte_config["logs"]["external"] + else "minio" + ), + "external": env.cluster.airbyte_config["logs"]["external"], + "s3_log_bucket": "airbyte", + }, + ), + "cron": config.get( + "cron", + { + "enabled": True, + }, + ), + "resources": config.get( + "resources", + { + "webapp": { + "requests": {"cpu": "100m", "memory": "50Mi"}, + "limits": {"cpu": "500m", "memory": "250Mi"}, + }, + "pod-sweeper": { + "requests": {"cpu": "100m", "memory": "100Mi"}, + "limits": {"cpu": "250m", "memory": "300Mi"}, + }, + "server": { + "requests": {"cpu": "100m", "memory": "600Mi"}, + "limits": {"cpu": "1", "memory": "1.5Gi"}, + }, + "worker": { + "requests": {"cpu": "100m", "memory": "500Mi"}, + "limits": {"cpu": "1", "memory": "2.5Gi"}, + }, + "temporal": { + "requests": {"cpu": "100m", "memory": "150Mi"}, + "limits": {"cpu": "500m", "memory": "300Mi"}, + }, + "cron": { + "requests": {"cpu": "100m", "memory": "700Mi"}, + "limits": {"cpu": "1", "memory": "1Gi"}, + }, + "connector-builder-server": { + "requests": {"cpu": "100m", "memory": "600Mi"}, + "limits": {"cpu": "1", "memory": "1.5Gi"}, + }, + "api-server": { + "requests": {"cpu": "100m", "memory": "600Mi"}, + "limits": {"cpu": "1", "memory": "1.5Gi"}, + }, + "workload-launcher": { + "requests": {"cpu": "250m", "memory": "500Mi"}, + "limits": {"cpu": "500m", "memory": "1Gi"}, + }, + "connector-rollout-worker": { + "requests": {"cpu": "250m", "memory": "500Mi"}, + "limits": {"cpu": "500m", "memory": "1Gi"}, + }, + }, + ), + } + ) + + return config + + @classmethod + def get_unmet_preconditions(cls, env: Environment): + return ( + cls._chart_version_unmet_precondition(env) + + cls._external_db_config_unmet_preconditions(env.airbyte_config) + + cls._external_logs_config_unmet_preconditions(env.airbyte_config, env) + ) + + @classmethod + def _gen_airbyte_secrets(cls, env: Environment): + if env.airbyte_config["logs"]["backend"] == "minio": + auth_config = env.minio_config["auth"] + airbyte_logs_secrets = { + "access_key": auth_config["root_user"], + "secret_key": auth_config["root_password"], + } + else: + airbyte_logs_secrets = { + "access_key": env.airbyte_config["logs"]["access_key"], + "secret_key": env.airbyte_config["logs"]["secret_key"], + } + + return [ + make.hashed_secret( + name=cls.AIRBYTE_LOGS_SECRET_NAME, + data=airbyte_logs_secrets, + labels=cls._get_labels_adapter(), + ), + ] + + @classmethod + def _gen_airbyte_values( + cls, + env: Environment, + logs_secrets_name: str, + ): + probes_initial_delay = 90 if env.cluster.is_local else 180 + + values = { + "serviceAccount": {"create": False, "name": "airbyte-admin"}, + "version": env.release.airbyte_version, + "webapp": { + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + }, + "cron": { + "enabled": env.airbyte_config["cron"]["enabled"], + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + }, + "server": { + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "readinessProbe": {"initialDelaySeconds": probes_initial_delay}, + "livenessProbe": {"initialDelaySeconds": probes_initial_delay}, + "podLabels": cls._get_labels_adapter(), + }, + "worker": { + # https://github.com/airbytehq/airbyte/blob/master/docs/operator-guides/configuring-airbyte.md + "extraEnv": [ + { + "name": "JOB_KUBE_MAIN_CONTAINER_IMAGE_PULL_POLICY", + "value": "IfNotPresent", + }, + { + "name": "JOB_KUBE_NODE_SELECTORS", + "value": ",".join( + [ + f"{key}={val}" + for key, val in cls.WORKER_NODE_SELECTOR.items() + ] + ), + }, + # Removed as duplicated + # { + # "name": "JOB_KUBE_MAIN_CONTAINER_IMAGE_PULL_SECRET", + # "value": env.docker_config_secret_name, + # }, + # { + # "name": "JOB_KUBE_NAMESPACE", + # "value": env.k8s_namespace, + # }, + ], + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + }, + "airbyte-bootloader": { + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + }, + "temporal": { + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + }, + "connector-builder-server": { + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + }, + "global": {}, + } + + if cls._is_feature_enabled("pod-sweeper", env): + kubectl_repo, kubectl_tag = env.get_service_image( + "airbyte", "bitnami/kubectl" + ) + + values["pod-sweeper"] = { + "image": { + "repository": kubectl_repo, + "tag": kubectl_tag, + }, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + } + + if cls._is_feature_enabled("airbyte-keycloak", env): + # Not interested in using keycloak for now + values["keycloak"] = {"enabled": False} + values["keycloak-setup"] = {"enabled": False} + + if not cls._is_feature_enabled("workload-api-server", env): + api_server_repo, api_server_tag = env.get_service_image( + "airbyte", "airbyte/airbyte-api-server" + ) + values["airbyte-api-server"] = { + "image": { + "repository": api_server_repo, + "tag": api_server_tag, + }, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + } + + if cls._is_feature_enabled("global-image-registry", env): + values["global"]["image"] = {"registry": env.docker_registry} + else: + webapp_repo, webapp_tag = env.get_service_image("airbyte", "airbyte/webapp") + server_repo, server_tag = env.get_service_image("airbyte", "airbyte/server") + worker_repo, worker_tag = env.get_service_image( + "airbyte", + ( + "datacovesprivate/airbyte-worker" + if cls._is_feature_enabled("jobs-custom-registry", env) + else "airbyte/worker" + ), + ) + cron_repo, cron_tag = env.get_service_image("airbyte", "airbyte/cron") + bootloader_repo, bootloader_tag = env.get_service_image( + "airbyte", "airbyte/bootloader" + ) + temporal_repo, temporal_tag = env.get_service_image( + "airbyte", "temporalio/auto-setup" + ) + connector_builder_repo, connector_builder_tag = env.get_service_image( + "airbyte", "airbyte/connector-builder-server" + ) + values["webapp"]["image"] = { + "repository": webapp_repo, + "tag": webapp_tag, + } + values["server"]["image"] = { + "repository": server_repo, + "tag": server_tag, + } + values["worker"]["image"] = { + "repository": worker_repo, + "tag": worker_tag, + } + values["cron"]["image"] = { + "repository": cron_repo, + "tag": cron_tag, + } + values["airbyte-bootloader"]["image"] = { + "repository": bootloader_repo, + "tag": bootloader_tag, + } + values["temporal"]["image"] = { + "repository": temporal_repo, + "tag": temporal_tag, + } + values["connector-builder-server"]["image"] = { + "repository": connector_builder_repo, + "tag": connector_builder_tag, + } + + if cls._is_feature_enabled("workload-launcher", env): + values["workload-launcher"] = { + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + } + if cls._is_feature_enabled("connector-rollout-worker", env): + values["connector-rollout-worker"] = { + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "podLabels": cls._get_labels_adapter(), + } + + if cls._is_feature_enabled("airbyte-url", env): + values["global"]["airbyteUrl"] = cls.get_public_url(env) + + if cls._is_feature_enabled("jobs-custom-registry", env): + values["global"]["jobs"] = { + "kube": { + "images": { + "busybox": ":".join( + env.get_service_image("airbyte", "busybox") + ), + "curl": ":".join( + env.get_service_image("airbyte", "curlimages/curl") + ), + "socat": ":".join( + env.get_service_image("airbyte", "alpine/socat") + ), + }, + "main_container_image_pull_secret": env.docker_config_secret_name, + } + } + values["worker"]["extraEnv"].append( + { + "name": "JOB_KUBE_MAIN_CONTAINER_IMAGE_REGISTRY", + "value": env.docker_registry, + } + ) + + # Database + if env.airbyte_config["db"].get("external", False): + values["postgresql"] = {"enabled": False} + db_config = env.airbyte_config["db"] + base_config = { + "host": db_config["host"], + "user": db_config["user"], + "password": db_config["password"], + "database": db_config["database"], + "port": db_config.get("port", 5432), + } + if cls._is_feature_enabled("internal-external-db", env): + values["global"]["database"] = base_config + if db_config["tls_enabled"]: + # we only set database type to external if TLS is enabled + # https://github.com/airbytehq/airbyte-platform/blob/6a291f688cec6ba498676325cfadfac3e48ece48/charts/airbyte-temporal/templates/deployment.yaml#L71 + values["global"]["database"]["type"] = "external" + else: + values["externalDatabase"] = base_config + + values["temporal"]["extraEnv"] = [ + { + "name": "SQL_TLS", + "value": str(db_config["tls"]).lower(), + }, + { + "name": "SQL_TLS_ENABLED", + "value": str(db_config["tls_enabled"]).lower(), + }, + { + "name": "POSTGRES_TLS_ENABLED", + "value": str(db_config["tls_enabled"]).lower(), + }, + { + "name": "SQL_TLS_DISABLE_HOST_VERIFICATION", + "value": str(db_config["tls_disable_host_verification"]).lower(), + }, + { + "name": "POSTGRES_TLS_DISABLE_HOST_VERIFICATION", + "value": str(db_config["tls_disable_host_verification"]).lower(), + }, + { + "name": "SQL_HOST_VERIFICATION", + "value": str(db_config["host_verification"]).lower(), + }, + # https://docs.temporal.io/blog/auto-setup/ + { + "name": "DBNAME", + "value": f"{env.slug}_airbyte_temporal", + }, + { + "name": "VISIBILITY_DBNAME", + "value": f"{env.slug}_airbyte_temporal_visibility", + }, + ] + else: + values["postgresql"] = { + "postgresqlPassword": env.airbyte_config["db"]["password"], + } + + # Logs + logs_config = env.airbyte_config["logs"] + if cls._is_feature_enabled("storage-refactor", env): + if logs_config.get("external", False): + values["global"]["storage"] = { + "type": "S3", + "bucket": { + "log": logs_config["s3_log_bucket"], + "state": logs_config["s3_log_bucket"], + "workloadOutput": logs_config["s3_log_bucket"], + }, + "s3": { + "region": env.airbyte_config["logs"]["s3_log_bucket_region"] + }, + "storageSecretName": logs_secrets_name, + } + else: + values["global"]["logs"] = { + "accessKey": { + "existingSecret": logs_secrets_name, + "existingSecretKey": "access_key", + }, + "secretKey": { + "existingSecret": logs_secrets_name, + "existingSecretKey": "secret_key", + }, + "minio": {"enabled": False}, + } + values["minio"] = {"enabled": False} + + if logs_config.get("external", False): + values["global"]["logs"]["s3"] = { + "enabled": True, + "bucket": logs_config["s3_log_bucket"], + "bucketRegion": logs_config["s3_log_bucket_region"], + } + values["global"]["state"] = {"storage": {"type": "S3"}} + values["global"]["logs"]["storage"] = {"type": "S3"} + else: + auth_config = env.minio_config["auth"] + values["global"]["logs"]["externalMinio"] = { + "enabled": True, + "host": MinioAdapter.deployment_name.format(env_slug=env.slug), + "port": 9000, + } + values["minio"] = { + "enabled": False, + "auth": { + "rootUser": auth_config["root_user"], + "rootPassword": auth_config["root_password"], + }, + } + + if env.cluster.defines_resource_requests: + resources_config = env.airbyte_config["resources"] + values["webapp"]["resources"] = resources_config["webapp"] + if cls._is_feature_enabled("pod-sweeper", env): + values["pod-sweeper"]["resources"] = resources_config["pod-sweeper"] + values["server"]["resources"] = resources_config["server"] + values["worker"]["resources"] = resources_config["worker"] + values["temporal"]["resources"] = resources_config["temporal"] + values["cron"]["resources"] = resources_config["cron"] + values["connector-builder-server"]["resources"] = resources_config[ + "connector-builder-server" + ] + if cls._is_feature_enabled("workload-api-server", env): + values["workload-api-server"]["resources"] = resources_config[ + "api-server" + ] + else: + values["airbyte-api-server"]["resources"] = resources_config[ + "api-server" + ] + + if cls._is_feature_enabled("workload-launcher", env): + values["workload-launcher"]["resources"] = resources_config[ + "workload-launcher" + ] + if cls._is_feature_enabled("connector-rollout-worker", env): + values["connector-rollout-worker"]["resources"] = resources_config[ + "connector-rollout-worker" + ] + + return deep_merge( + env.airbyte_config.get("override_values", {}), + deep_merge(env.cluster.airbyte_config.get("override_values", {}), values), + ) + + @classmethod + def get_internal_service_config(cls, env: Environment, name: str) -> dict: + if name == "minio": + # Airbyte provides its own minio instance + if not cls._is_feature_enabled("airbyte-minio", env): + config = env.airbyte_config["logs"] + if config["backend"] == "minio" and not config["external"]: + return {"buckets": [config["s3_log_bucket"]]} + return None + + @classmethod + def on_post_enabled(cls, env: Environment) -> dict: + config = {} + + if ( + env.cluster.has_dynamic_db_provisioning() + and env.airbyte_config["db"]["external"] + ): + read_only_db_user = cls._create_read_only_db_user(env=env) + if read_only_db_user: + config["db_read_only"] = read_only_db_user + + return config diff --git a/src/core/api/app/clusters/adapters/airflow.py b/src/core/api/app/clusters/adapters/airflow.py new file mode 100644 index 00000000..9d86f2df --- /dev/null +++ b/src/core/api/app/clusters/adapters/airflow.py @@ -0,0 +1,1759 @@ +import base64 +import json +import logging +import os +import secrets +from copy import deepcopy +from enum import Enum +from urllib.parse import urlparse + +import boto3 +import requests +from botocore.exceptions import ClientError +from clusters.request_utils import get_services_resources +from clusters.tasks import setup_airflow_roles +from credentials.models import Secret +from django.conf import settings +from django.contrib.auth.models import Permission +from django.utils import timezone +from django.utils.text import slugify +from integrations.models import Integration +from kubernetes.client.rest import ApiException as K8ApiException +from projects.models import Environment +from rest_framework.authtoken.models import Token +from users.models import User + +from lib.dicts import deep_merge, set_in +from lib.kubernetes import make + +from ..external_resources.efs import create_filesystem +from ..external_resources.postgres import create_database +from ..external_resources.s3 import create_bucket +from ..models import Cluster +from . import EnvironmentAdapter +from .minio import MinioAdapter +from .mixins.airflow_config import REPO_PATH, AirflowConfigMixin + +logger = logging.getLogger(__name__) + + +class DagsSource(Enum): + GIT = "git" + S3 = "s3" + + +class AirflowAdapter(EnvironmentAdapter, AirflowConfigMixin): + service_name = settings.SERVICE_AIRFLOW + deployment_name = "{env_slug}-airflow-webserver" + subdomain = "airflow-{env_slug}" + supported_integrations = [ + Integration.INTEGRATION_TYPE_SMTP, + Integration.INTEGRATION_TYPE_MSTEAMS, + Integration.INTEGRATION_TYPE_SLACK, + ] + chart_versions = ["1.7.0-dev", "1.13.1", "1.15.0"] + chart_features = { + # Prometheus statsd exporter + "prometheus_statsd_exporter": ">= 1.13.1", + # Security Manager V2 + "security_manager_v2": ">= 1.13.1", + # Standalone DagProcessor + "standalone_dag_processor": ">= 1.13.1", + # Cronjob to clean up empty folders + "cronjob_to_cleanup_full_logs": "<= 1.7.0-dev", + # Interval between git sync attempts in Go-style duration string + "gitSync.period": ">= 1.13.1", + } + + AIRFLOW_LOGS_PVC_NAME = "airflow-logs-pvc" + AIRFLOW_VALUES_CONFIG_MAP_NAME = "airflow-values" + AIRFLOW_GITSYNC_SECRET_NAME = "airflow-gitsync-secret" + AIRFLOW_S3SYNC_SECRET_NAME = "airflow-s3sync-secret" + AIRFLOW_ENV_SECRET_NAME = "airflow-env-secret" + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + """ + Generate k8s resources + """ + resources = [] + + webserver_secret = make.hashed_secret( + name="airflow-webserver-secret", + data=cls._gen_airflow_webserver_secret(env), + labels=cls._get_labels_adapter(), + ) + resources.append(webserver_secret) + + dags_source_secret_name = None + if env.airflow_config["dags_source"] == DagsSource.GIT.value: + dags_source_secret = make.hashed_secret( + name=cls.AIRFLOW_GITSYNC_SECRET_NAME, + data=cls._gen_airflow_git_sync_secret(env), + labels=cls._get_labels_adapter(), + ) + dags_source_secret_name = dags_source_secret["metadata"]["name"] + resources.append(dags_source_secret) + + elif env.airflow_config["dags_source"] == DagsSource.S3.value: + # If using access/secret keys, we need to create a secret for them + if env.airflow_config["s3_sync"].get("access_key"): + dags_source_secret = make.hashed_secret( + name=cls.AIRFLOW_S3SYNC_SECRET_NAME, + data=cls._gen_airflow_s3_sync_secret(env), + labels=cls._get_labels_adapter(), + ) + dags_source_secret_name = dags_source_secret["metadata"]["name"] + resources.append(dags_source_secret) + + extra_env_secret = make.hashed_secret( + name=cls.AIRFLOW_ENV_SECRET_NAME, + data=cls._gen_airflow_extra_env_secret(env), + labels=cls._get_labels_adapter(), + ) + resources.append(extra_env_secret) + + values = cls._gen_airflow_values( + env, + webserver_secret["metadata"]["name"], + extra_env_secret["metadata"]["name"], + dags_source_secret_name=dags_source_secret_name, + ) + + if env.airflow_config.get("db", {}).get("external", False): + resources.extend(cls._configure_external_metadata_db(env, values)) + + logs_backend_configure = { + Cluster.LOGS_BACKEND_S3: cls._configure_s3_logs, + Cluster.LOGS_BACKEND_EFS: cls._configure_efs_logs, # Amazon Elastic File System + Cluster.LOGS_BACKEND_AFS: cls._configure_afs_logs, # Azure Files + Cluster.LOGS_BACKEND_NFS: cls._configure_afs_logs, # NFS local only + "loki": cls._configure_loki_logs, + "minio": cls._configure_s3_logs, + } + + log_backend = cls._get_log_backed(env=env) + if log_backend: + resources.extend( + logs_backend_configure[log_backend](env=env, values=values) + ) + + if cls._setup_second_log_handler(env=env): + resources.extend(logs_backend_configure["loki"](env=env, values=values)) + + values_config_map = make.hashed_json_config_map( + name=cls.AIRFLOW_VALUES_CONFIG_MAP_NAME, + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + resources.append(values_config_map) + + if log_backend in ( + Cluster.LOGS_BACKEND_EFS, + Cluster.LOGS_BACKEND_AFS, + Cluster.LOGS_BACKEND_NFS, + ) and cls._is_feature_enabled("cronjob_to_cleanup_full_logs", env): + resources.append( + cls._gen_cronjob_to_cleanup_full_logs( + env=env, + pvc_logs_name=values["logs"]["persistence"]["existingClaim"], + ) + ) + + if env.cluster.is_feature_enabled(code="observability_stack"): + resources.extend(cls._gen_service_monitors(env=env)) + + return resources + + @classmethod + def sync_external_resources(cls, env: Environment): + cls._sync_external_dbs(env) + cls._sync_external_logs(env) + + @classmethod + def _sync_external_dbs(cls, env: Environment): + # https://airflow.apache.org/docs/helm-chart/stable/parameters-ref.html#database + if not env.airflow_config["db"].get("external", False): + db_data = { + "host": f"{env.slug}-airflow-postgresql.{env.k8s_namespace}", + "port": 5432, + "user": os.getenv("AIRFLOW_DB_DEFAULT_USER"), + "password": os.getenv("AIRFLOW_DB_DEFAULT_PASS"), + "database": "postgres", + } + + else: + if not env.cluster.has_dynamic_db_provisioning(): + return + + already_configured = ( + cls._external_db_config_unmet_preconditions(env.airflow_config) == [] + ) + if already_configured: + return + + db_data = create_database(env=env, db_name="airflow") + + env.airflow_config["db"].update(db_data) + Environment.objects.filter(id=env.id).update(airflow_config=env.airflow_config) + + @classmethod + def _sync_external_logs(cls, env: Environment): + if not env.airflow_config["logs"].get("external", False): + return + + already_configured = ( + cls._external_logs_config_unmet_preconditions(env.airflow_config, env) == [] + ) + if already_configured: + return + + log_backed = cls._get_log_backed(env) + logs_data = None + if ( + log_backed == Cluster.LOGS_BACKEND_S3 + and env.cluster.has_dynamic_blob_storage_provisioning() + ): + logs_data = create_bucket(env, "airflow") + + elif ( + log_backed == Cluster.LOGS_BACKEND_EFS + and env.cluster.provider == Cluster.EKS_PROVIDER + and env.cluster.has_dynamic_network_filesystem_provisioning() + ): + logs_data = create_filesystem(env) + + if logs_data: + env.airflow_config["logs"].update(logs_data) + Environment.objects.filter(id=env.id).update( + airflow_config=env.airflow_config + ) + + @classmethod + def get_default_values(cls, env=None) -> dict: + if env and env.type: + high_availability = env.type == env.TYPE_PROD + else: + high_availability = False + + logs = {} + logs_external = False + if env: + if env.cluster.airflow_config["logs"]["external"]: + logs_external = True + logs_be = env.cluster.airflow_config["logs"]["backend"] + elif env.cluster.is_local: + # This option is better than using minio + logs_be = Cluster.LOGS_BACKEND_NFS + else: + logs_be = "minio" + + logs = { + "backend": logs_be, + "external": logs_external, + "s3_log_bucket": "airflow", + "loki_enabled": False, + "loki_host": "http://loki-loki-distributed-gateway.prometheus.svc.cluster.local", + } + + return { + "dags_folder": "orchestrate/dags", + "dags_source": DagsSource.GIT.value, + "yaml_dags_folder": "orchestrate/dags_yml_definitions", + "git_branch": env.project.release_branch if env and env.project else "main", + "s3_sync": { + "wait": 60, + "path": "s3:///", + "access_key": "", + "secret_key": "", + "iam_role": "", + }, + "git_sync_wait": 60, + "git_sync_max_failures": 5, + "high_availability": high_availability, + "datacoves_dags_folder": False, + # https://medium.com/walmartglobaltech/cracking-the-code-boosting-airflow-efficiency-through-airflow-configuration-tuning-optimisation-47f602e7dd9a + "custom_envs": {}, + "logs": logs, + "db": { + "external": ( + env.cluster.airflow_config["db"]["external"] + if env and env.cluster + else False + ) + }, + "cookie_secret": str( + base64.standard_b64encode(secrets.token_bytes(32)), "ascii" + ), + "fernet_key": str( + base64.standard_b64encode(secrets.token_bytes(32)), "ascii" + ), + "cleanup_jobs": True, + "upload_manifest": False, + "dbt_api_url": settings.DBT_API_URL, + "upload_manifest_url": settings.DBT_API_UPLOAD_MANIFEST_URL, + "resources": { + "scheduler": { + "requests": {"cpu": "100m", "memory": "500Mi"}, + "limits": {"cpu": "1", "memory": "2Gi"}, + }, + "triggerer": { + "requests": {"cpu": "100m", "memory": "500Mi"}, + "limits": {"cpu": "1", "memory": "2Gi"}, + }, + "webserver": { + "requests": {"cpu": "100m", "memory": "1Gi"}, + "limits": {"cpu": "1", "memory": "4Gi"}, + }, + "workers": { + "requests": {"cpu": "200m", "memory": "500Mi"}, + "limits": {"cpu": "1", "memory": "4Gi"}, + }, + "statsd": { + "requests": {"cpu": "50m", "memory": "50Mi"}, + "limits": {"cpu": "200m", "memory": "200Mi"}, + }, + "cleanup": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "250m", "memory": "500Mi"}, + }, + "dagProcessor": { + "requests": {"cpu": "100m", "memory": "300Mi"}, + "limits": {"cpu": "1", "memory": "1Gi"}, + }, + "pgbouncer": { + "requests": {"cpu": "200m", "memory": "400Mi"}, + "limits": {"cpu": "700m", "memory": "800Mi"}, + }, + "git_sync": { + "requests": {"cpu": "100m", "memory": "250Mi"}, + "limits": {"cpu": "200m", "memory": "500Mi"}, + }, + "s3_sync": { + "requests": {"cpu": "100m", "memory": "250Mi"}, + "limits": {"cpu": "200m", "memory": "500Mi"}, + }, + "log_groomer": { + "requests": {"cpu": "100m", "memory": "128Mi"}, + "limits": {"cpu": "200m", "memory": "250Mi"}, + }, + }, + "purge_history_from_metadata_db": True, + } + + @classmethod + def get_cluster_default_config(cls, cluster: Cluster, source: dict = None) -> dict: + config = super().get_cluster_default_config(cluster=cluster, source=source) + config.update( + { + "logs": config.get( + "logs", {"external": False, "backend": Cluster.LOGS_BACKEND_S3} + ), + "db": config.get( + "db", + { + "external": False, + }, + ), + } + ) + + return config + + @classmethod + def _get_smtp_connection_id(cls, env): + return f"{env.slug}|smtp_default" + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = deepcopy(env.airflow_config.copy()) + if source: + config.update(source) + + oidc = config.get("oauth") + if not oidc: + oidc = cls.get_oidc_config(env, "/oauth-authorized/datacoves") + + sa_token = config.get("service_account_token") + sa_password = config.get("service_account_password") + if not sa_token or not sa_password: + sa_token, sa_password = cls.setup_service_account_token_password(env) + + resources = config.get("resources") + if not resources: + resources = get_services_resources(env) + + default_values = cls.get_default_values(env) + default_resources = default_values["resources"] + custom_envs = default_values["custom_envs"] + custom_envs.update(config.get("custom_envs", {})) + + # Logs + current_logs_config = config.get("logs") + if current_logs_config: + default_values["logs"].update(current_logs_config) + logs = default_values["logs"] + + config.update( + { + "dags_folder": config.get("dags_folder", default_values["dags_folder"]), + "dags_source": config.get("dags_source", default_values["dags_source"]), + "yaml_dags_folder": config.get( + "yaml_dags_folder", default_values["yaml_dags_folder"] + ), + "git_branch": config.get("git_branch", default_values["git_branch"]), + "s3_sync": config.get("s3_sync", default_values["s3_sync"]), + "git_sync_wait": config.get( + "git_sync_wait", default_values["git_sync_wait"] + ), + "git_sync_max_failures": config.get( + "git_sync_max_failures", default_values["git_sync_max_failures"] + ), + "high_availability": config.get( + "high_availability", default_values["high_availability"] + ), + "secrets_backend_enabled": config.get("secrets_backend_enabled", True), + "api_enabled": config.get( + "api_enabled", + # admin_secrets needs api_enabled. + env.cluster.is_feature_enabled("admin_secrets"), + ), + "custom_envs": custom_envs, + "oauth": oidc, + "logs": logs, + "db": config.get("db", default_values["db"]), + "pgbouncer": config.get( + "pgbouncer", + { + "enabled": True, + "maxClientConn": 100, + "metadataPoolSize": 10, + "resultBackendPoolSize": 5, + }, + ), + "cookie_secret": ( + config.get( + "cookie_secret", + str( + base64.standard_b64encode(secrets.token_bytes(32)), "ascii" + ), + ) + ), + "fernet_key": ( + config.get( + "fernet_key", + default_values["fernet_key"], + ) + ), + "cleanup_jobs": config.get( + "cleanup_jobs", default_values["cleanup_jobs"] + ), + "upload_manifest": config.get( + "upload_manifest", default_values["upload_manifest"] + ), + "upload_manifest_url": config.get( + "upload_manifest_url", + default_values["upload_manifest_url"], + ), + "service_account_token": str(sa_token), + "service_account_password": str(sa_password), + "resources": { + "scheduler": resources.get( + "scheduler", default_resources["scheduler"] + ), + "triggerer": resources.get( + "triggerer", default_resources["triggerer"] + ), + "webserver": resources.get( + "webserver", default_resources["webserver"] + ), + "workers": resources.get("workers", default_resources["workers"]), + "statsd": resources.get("statsd", default_resources["statsd"]), + "cleanup": resources.get("cleanup", default_resources["cleanup"]), + "dagProcessor": resources.get( + "dagProcessor", default_resources["dagProcessor"] + ), + "pgbouncer": resources.get( + "pgbouncer", default_resources["pgbouncer"] + ), + "git_sync": resources.get( + "git_sync", default_resources["git_sync"] + ), + "s3_sync": resources.get("s3_sync", default_resources["s3_sync"]), + "log_groomer": resources.get( + "log_groomer", default_resources["log_groomer"] + ), + }, + "purge_history_from_metadata_db": config.get( + "purge_history_from_metadata_db", + default_values["purge_history_from_metadata_db"], + ), + } + ) + + return config + + @classmethod + def setup_service_account_token_password(cls, env: Environment): + """Sets up the service account and returns a token and password as a + tuple. Separating this out because there are other cases where + we need to run the parent's setup_service_account uninhibited. + """ + + sa_user = cls.setup_service_account(env) + + # Can be used for Airflow API + sa_password = secrets.token_urlsafe(12) + sa_user.set_password(sa_password) + sa_user.save() + + permission_name_template = settings.DBT_API_RESOURCES[0] + permission_name = permission_name_template.format( + cluster_domain=env.cluster.domain, env_slug=env.slug + ) + dbt_api_permission = Permission.objects.get(name=permission_name) + sa_user.user_permissions.add(dbt_api_permission) + + for perm in [ + f"{env.slug}|workbench:{settings.SERVICE_AIRFLOW}|{settings.ACTION_WRITE}", + f"{env.slug}|workbench:{settings.SERVICE_AIRFLOW}:admin|{settings.ACTION_WRITE}", + ]: + airflow_permission = Permission.objects.get(name__contains=perm) + if not sa_user.user_permissions.filter(name__contains=perm).exists(): + sa_user.user_permissions.add(airflow_permission) + + sa_token, _ = Token.objects.get_or_create(user=sa_user) + + return sa_token, sa_password + + @classmethod + def get_unmet_preconditions(cls, env: Environment): + unmets = ( + cls._chart_version_unmet_precondition(env) + + cls._external_db_config_unmet_preconditions(env.airflow_config) + + cls._external_logs_config_unmet_preconditions(env.airflow_config, env) + ) + if env.airflow_config["dags_source"] == DagsSource.GIT.value: + unmets += cls._git_clone_unmet_precondition(env) + elif env.airflow_config["dags_source"] == DagsSource.S3.value: + unmets += cls._s3_sync_unmet_preconditions(env) + else: + unmets += [ + { + "code": "invalid_dags_source_in_airflow_config", + "message": "Invalid 'dags_source' in airflow_config, 'git' or 's3' expected", + } + ] + + logs_backend = env.airflow_config["logs"].get("backend") + if ( + logs_backend + and logs_backend == "loki" + and not env.cluster.is_feature_enabled(code="observability_stack") + ): + unmets += [ + { + "code": "invalid_logs_in_airflow_config", + "message": "Observability stack must be enabled for Loki logs", + } + ] + + return unmets + + @classmethod + def _s3_sync_unmet_preconditions(cls, environment: Environment) -> list: + airflow_config = environment.airflow_config + s3_sync = airflow_config.get("s3_sync", {}) + s3_sync_copy = s3_sync.copy() + valid_credentials = s3_sync_copy.pop("valid_credentials", {}) + if not s3_sync.get("path"): + return [ + { + "code": "missing_path_in_s3_sync_config", + "message": "Missing 'path' in s3_sync config", + } + ] + if ( + not s3_sync.get("access_key") + and not s3_sync.get("secret_key") + and not s3_sync.get("iam_role") + ): + return [ + { + "code": "missing_credentials_in_s3_sync_config", + "message": "Missing 'access_key' and 'secret_key', or 'iam_role', in s3_sync config", + } + ] + else: + parsed_path = urlparse(s3_sync.get("path")) + bucket_name = parsed_path.netloc + bucket_path = parsed_path.path + if ( + s3_sync.get("access_key") + and s3_sync.get("secret_key") + and not s3_sync.get("iam_role") + ): + if valid_credentials and ( + valid_credentials.get("access_key") == s3_sync.get("access_key") + and valid_credentials.get("secret_key") == s3_sync.get("secret_key") + and valid_credentials.get("path") == s3_sync.get("path") + ): + return [] + try: + s3_client = boto3.client( + "s3", + aws_access_key_id=s3_sync.get("access_key"), + aws_secret_access_key=s3_sync.get("secret_key"), + ) + s3_client.list_objects(Bucket=bucket_name, Prefix=bucket_path) + except ClientError: + return [ + { + "code": "invalid_s3_sync_config_using_iam_user", + "message": "Unable to read S3 objects using IAM User", + } + ] + except Exception as exc: + return [ + { + "code": "invalid_s3_sync_config_using_iam_user", + "message": str(exc), + } + ] + s3_sync.update({"validated_at": str(timezone.now())}) + airflow_config["s3_sync"].update({"valid_credentials": s3_sync_copy}) + Environment.objects.filter(id=environment.id).update( + **{"airflow_config": airflow_config} + ) + return [] + + @classmethod + def _add_resources_requests(cls, env, values, cleanup_jobs): + """ + Adding resources requests/limits configuration + """ + components = [ + "webserver", + "scheduler", + "triggerer", + "workers", + "statsd", + "pgbouncer", + ] + if cls._is_feature_enabled("standalone_dag_processor", env): + components.append("dagProcessor") + + if cleanup_jobs: + components.append("cleanup") + + resources = env.airflow_config["resources"] + for component in components: + values[component]["resources"] = resources[component] + + values["scheduler"]["logGroomerSidecar"]["resources"] = resources.get( + "log_groomer", {} + ) + + if env.airflow_config["dags_source"] == DagsSource.GIT.value: + values["dags"]["gitSync"]["resources"] = resources.get("git_sync", {}) + + @classmethod + def _configure_datahub(cls, env: Environment, values): + datahub_enabled = ( + env.is_service_enabled(settings.SERVICE_DATAHUB) + and env.datahub_config.get("airflow_ingestion_enabled") + and env.airflow_config["secrets_backend_enabled"] + ) + set_in(values, ("config", "datahub", "enabled"), datahub_enabled) + + @classmethod + def _get_livenessprobe(cls, env: Environment, component: str) -> dict: + probe = ( + env.airflow_config.get("probes", {}).get(component, {}).get("liveness", {}) + ) + + data = { + "failureThreshold": probe.get("failureThreshold", 10), + "initialDelaySeconds": probe.get("initialDelaySeconds", 120), + "periodSeconds": probe.get("periodSeconds", 60), + "timeoutSeconds": probe.get("timeoutSeconds", 30), + } + + if probe.get("command"): + # Dummy command + # ["bash", "-c", "echo ok"] + data["command"] = probe.get("command") + + return data + + @classmethod + def _gen_airflow_values( + cls, + env: Environment, + webserver_secret_name, + extra_env_secret_name, + dags_source_secret_name=None, + ) -> dict: + version = env.release.airflow_version + airflow_repo, airflow_tag = env.get_image( + "datacovesprivate/airflow-airflow", True + ) + env_vars = cls.get_env_vars(env) + cleanup_jobs = env.airflow_config["cleanup_jobs"] + + values = { + "executor": "KubernetesExecutor", + "env": env_vars, + "fernetKey": env.airflow_config["fernet_key"], + "extraEnvFrom": f" - secretRef:\n name: '{extra_env_secret_name}'", + "webserverSecretKeySecretName": webserver_secret_name, + "defaultAirflowRepository": airflow_repo, + "defaultAirflowTag": airflow_tag, + "airflowVersion": version, + "registry": {"secretName": env.docker_config_secret_name}, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "webserver": { + "webserverConfig": cls._gen_airflow_webserver_config(env), + "livenessProbe": cls._get_livenessprobe(env, "webserver"), + "readinessProbe": { + "failureThreshold": 5, + "initialDelaySeconds": 60, + "periodSeconds": 30, + "timeoutSeconds": 30, + }, + "startupProbe": { + "failureThreshold": 20, + "periodSeconds": 30, + "timeoutSeconds": 30, + }, + }, + "workers": { + "nodeSelector": cls.WORKER_NODE_SELECTOR, + "containerLifecycleHooks": { + "preStop": { + "exec": {"command": ["/opt/datacoves/pre_stop_hook.sh"]} + } + }, + # Workers don't have a startupProbe. You CAN turn off + # livenessProbe if it turns out to be a problem though ... + "livenessProbe": cls._get_livenessprobe(env, "workers"), + "extraVolumeMounts": [ + { + "mountPath": "/opt/airflow/pod_templates/pod_template_file.yaml", + "name": "config", + "readOnly": True, + "subPath": "pod_template_file.yaml", + }, + ], + }, + "scheduler": { + "logGroomerSidecar": { + "enabled": True, + "retentionDays": env.airflow_config.get("log_retention_days", 15), + }, + "livenessProbe": cls._get_livenessprobe(env, "scheduler"), + "startupProbe": { + "failureThreshold": 20, + "periodSeconds": 30, + "timeoutSeconds": 30, + }, + }, + "triggerer": { # Does not support startup probe for some reason + "logGroomerSidecar": {"enabled": False}, + "livenessProbe": cls._get_livenessprobe(env, "triggerer"), + }, + "labels": cls._get_labels_adapter(), + "pgbouncer": env.airflow_config["pgbouncer"], + "statsd": {}, + "config": {}, + } + + if cls._setup_second_log_handler(env=env): + values["config"]["logging"] = { + "logging_config_class": "log_config.LOGGING_CONFIG", + } + + if env.airflow_config["api_enabled"]: + values["config"]["api"] = { + "auth_backends": "airflow.auth.custom_api_auth,airflow.api.auth.backend.session" + } + + # Secret manager relies on Airflow API custom authentication + if env.airflow_config["secrets_backend_enabled"]: + values["config"]["secrets"] = { + "backend": "airflow.secrets.datacoves.DatacovesBackend", + } + + values["scheduler"]["containerLifecycleHooks"] = { + "postStart": { + "exec": { + "command": ["/opt/datacoves/post_start_hook.sh"], + } + } + } + + # If we're using node local DNS, we'll need a custom pod template + # file for airflow workers. + if env.cluster.is_feature_enabled("node_local_dns_enabled"): + with open( + "clusters/adapters/airflow/pod-template-file.kubernetes-helm-yaml", "r" + ) as file: + custom_pod_template = file.read() + + values["podTemplate"] = custom_pod_template + + cls._configure_datahub(env, values) + cls._configure_dags_source_values( + env, + env.airflow_config.get("dags_folder", ""), + values, + dags_source_secret_name, + ) + + if cleanup_jobs: + values["cleanup"] = { + "enabled": True, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + } + + if env.airflow_config["high_availability"]: + values["webserver"]["podDisruptionBudget"] = { + "enabled": True, + } + values["webserver"]["replicas"] = 2 + values["scheduler"] = values.get("scheduler", {}) + values["scheduler"]["podDisruptionBudget"] = { + "enabled": True, + } + values["scheduler"]["replicas"] = 2 + values["workers"]["podAnnotations"] = { + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false" + } + + if env.cluster.defines_resource_requests: + cls._add_resources_requests(env, values, cleanup_jobs) + + # https://airflow.apache.org/docs/helm-chart/stable/parameters-ref.html#images + def h(image_and_tag): + return {"repository": image_and_tag[0], "tag": image_and_tag[1]} + + values["images"] = { + "airflow": {"pullPolicy": "IfNotPresent"}, + "statsd": h( + env.get_service_image("airflow", "quay.io/prometheus/statsd-exporter") + if cls._is_feature_enabled("prometheus_statsd_exporter", env) + else env.get_service_image( + "airflow", "apache/airflow", tag_prefix="airflow-statsd" + ) + ), + "pgbouncer": h( + env.get_service_image( + "airflow", "apache/airflow", tag_prefix="airflow-pgbouncer" + ) + ), + "pgbouncerExporter": h( + env.get_service_image( + "airflow", "apache/airflow", tag_prefix="airflow-pgbouncer-exporter" + ) + ), + } + + if env.airflow_config["dags_source"] == DagsSource.GIT.value: + values["images"]["gitSync"] = h( + env.get_service_image("core", "registry.k8s.io/git-sync/git-sync") + ) + # This is done to resolve cluster internal ip on local and private clusters + host_aliases = [ + { + "hostnames": [f"api.{env.cluster.domain}"], + "ip": env.cluster.internal_ip, + } + ] + + if env.cluster.is_local: + # Adding hostAliases to resolve datacoveslocal.com on each pod. + set_in(values, ("webserver", "hostAliases"), host_aliases) + set_in(values, ("scheduler", "hostAliases"), host_aliases) + set_in(values, ("workers", "hostAliases"), host_aliases) + # TODO: dagProcessor does not support hostAliases + # set_in(values, ("dagProcessor", "hostAliases"), host_aliases) + + cls._configure_smtp_integration_values(env, values) + return deep_merge( + env.airflow_config.get("override_values", {}), + deep_merge(env.cluster.airflow_config.get("override_values", {}), values), + ) + + @classmethod + def _configure_smtp_integration_values(cls, env, values): + """ + Adds needed environment variables to make smtp integration work accordingly + """ + smtp_integration = cls.get_enabled_integrations( + env, Integration.INTEGRATION_TYPE_SMTP + ).first() + if smtp_integration: + integration = smtp_integration.integration.settings + if integration.get("server") == "datacoves": + values["env"] += [ + { + "name": "AIRFLOW__SMTP__SMTP_HOST", + "value": settings.EMAIL_HOST, + }, + { + "name": "AIRFLOW__SMTP__SMTP_MAIL_FROM", + "value": settings.DEFAULT_FROM_EMAIL, + }, + { + "name": "AIRFLOW__SMTP__SMTP_PORT", + "value": str(settings.EMAIL_PORT), + }, + {"name": "AIRFLOW__SMTP__SMTP_STARTTLS", "value": "true"}, + {"name": "AIRFLOW__SMTP__SMTP_SSL", "value": "false"}, + ] + else: + values["env"] += [ + { + "name": "AIRFLOW__SMTP__SMTP_HOST", + "value": integration["host"], + }, + { + "name": "AIRFLOW__SMTP__SMTP_MAIL_FROM", + "value": integration["mail_from"], + }, + { + "name": "AIRFLOW__SMTP__SMTP_PORT", + "value": str(integration["port"]), + }, + ] + if "ssl" in integration: + values["env"] += [ + { + "name": "AIRFLOW__SMTP__SMTP_SSL", + "value": str(integration["ssl"]), + }, + ] + if "start_tls" in integration: + values["env"] += [ + { + "name": "AIRFLOW__SMTP__SMTP_STARTTLS", + "value": str(integration["start_tls"]), + }, + ] + + @classmethod + def _get_dags_git_envs(cls, env: Environment) -> dict: + _, git_sync_image_tag = env.release.get_image( + repo="registry.k8s.io/git-sync/git-sync" + ) + git_envs = [ + {"name": "GITSYNC_SSH_KNOWN_HOSTS", "value": "false"}, + {"name": "GIT_SYNC_KNOWN_HOSTS", "value": "false"}, + {"name": "GITSYNC_SUBMODULES", "value": "off"}, + {"name": "GITSYNC_SYNC_TIMEOUT", "value": "300s"}, + ] + + if git_sync_image_tag.startswith("v3"): + git_envs = [ + {"name": "GIT_KNOWN_HOSTS", "value": "false"}, + {"name": "GIT_SYNC_KNOWN_HOSTS", "value": "false"}, + {"name": "GIT_SYNC_SUBMODULES", "value": "off"}, + {"name": "GIT_SYNC_TIMEOUT", "value": "300"}, + ] + + return git_envs + + @classmethod + def _configure_dags_source_values( + cls, env, dags_folder, values, dags_source_secret_name=None + ): + """ + Configures git or s3 sync services as init/sidecar containers + """ + if cls._is_feature_enabled("standalone_dag_processor", env): + values["dagProcessor"] = { + "enabled": True, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "logGroomerSidecar": {"enabled": False}, + "livenessProbe": { + "failureThreshold": 5, + "initialDelaySeconds": 60, + "periodSeconds": 30, + "timeoutSeconds": 30, + }, + } + + if env.airflow_config["dags_source"] == DagsSource.GIT.value: + cls._configure_dags_source_values_for_git_sync( + env, values, dags_folder, dags_source_secret_name + ) + else: + cls._configure_dags_source_values_for_s3_sync( + env, values, dags_source_secret_name + ) + + @classmethod + def _configure_dags_source_values_for_git_sync( + cls, env, values, dags_folder, dags_source_secret_name=None + ): + git_sync_branch = env.airflow_config.get( + "git_branch", env.project.release_branch + ) + + # To avoid duplicates of "refs/heads/" in the branch name + git_sync_branch = git_sync_branch.replace("refs/heads/", "") + git_sync_wait = env.airflow_config["git_sync_wait"] + git_sync_max_failures = str(env.airflow_config["git_sync_max_failures"]) + + values["dags"] = { + "gitSync": { + "enabled": True, + "depth": 1, + "env": cls._get_dags_git_envs(env=env), + "branch": f"refs/heads/{git_sync_branch}", + "subPath": dags_folder, + "maxFailures": int(git_sync_max_failures), + } + } + + if cls._is_feature_enabled("gitSync.period", env): + values["dags"]["gitSync"]["period"] = f"{git_sync_wait}s" + else: + values["dags"]["gitSync"]["wait"] = git_sync_wait + + # If http clone strategy, set user and password + if env.project.clone_strategy == env.project.HTTP_CLONE_STRATEGY: + values["dags"]["gitSync"]["credentialsSecret"] = dags_source_secret_name + values["dags"]["gitSync"]["repo"] = env.project.repository.url + elif env.project.clone_strategy in ( + env.project.AZURE_SECRET_CLONE_STRATEGY, + env.project.AZURE_CERTIFICATE_CLONE_STRATEGY, + ): + values["dags"]["gitSync"]["env"].append( + { + "name": "GITSYNC_ASKPASS_URL", + "value": f"https://api.{env.cluster.domain}/api/v1/gitcallback/" + + str(env.project.uid), + } + ) + values["dags"]["gitSync"]["env"].append( + # v3 support, just in case + { + "name": "GIT_SYNC_ASKPASS_URL", + "value": f"https://api.{env.cluster.domain}/api/v1/gitcallback/" + + str(env.project.uid), + } + ) + values["dags"]["gitSync"]["repo"] = env.project.repository.url + else: + values["dags"]["gitSync"]["sshKeySecret"] = dags_source_secret_name + values["dags"]["gitSync"]["repo"] = env.project.repository.git_url + + @classmethod + def _configure_dags_source_values_for_s3_sync( + cls, env, values, dags_source_secret_name=None + ): + aws_repo, aws_tag = env.get_service_image("airflow", "amazon/aws-cli") + s3_path = env.airflow_config["s3_sync"]["path"] + s3_sync_wait = env.airflow_config["s3_sync"]["wait"] + s3_extra_params = env.airflow_config["s3_sync"].get("extra_params", "") + + # This is necessary so that when using s3 or git + # the same path is configured and the path is in the PYTHONPATH + values["dags"] = {"mountPath": str(cls._abs_repo_path(env))} + + # TODO: Remove when update container s3-sync works + if "--region" not in s3_extra_params: + try: + s3_urlparse = urlparse(s3_path) + r = requests.get(f"https://{s3_urlparse.netloc}.s3.amazonaws.com") + s3_region = r.headers["x-amz-bucket-region"] + if s3_region: + s3_extra_params = f"--region {s3_region} {s3_extra_params}" + + except Exception as err: + logger.error("Could not get S3 region for bucket %s: %s", s3_path, err) + + s3_sync_command = ( + f"aws s3 sync --exact-timestamps --delete --only-show-errors " + f"{s3_path} /dags {s3_extra_params}" + ) + + volume_mounts = [ + { + "name": "airflow-dags", + "mountPath": "/dags", + } + ] + extra_volumes = [ + { + "name": "airflow-dags", + "emptyDir": {}, + } + ] + if dags_source_secret_name: + # If secret was created containing aws credentials + volume_mounts.append( + { + "name": "aws-credentials", + "mountPath": "/root/.aws", + } + ) + extra_volumes.append( + { + "name": "aws-credentials", + "secret": { + "secretName": dags_source_secret_name, + "items": [{"key": "credentials", "path": "credentials"}], + "defaultMode": 0o644, + }, + } + ) + extra_volume_mounts = [ + { + "name": "airflow-dags", + "mountPath": REPO_PATH, + } + ] + s3_sync_sidecar = { + "extraContainers": [ + { + "name": "s3-sync", + "image": f"{aws_repo}:{aws_tag}", + "imagePullPolicy": "IfNotPresent", + "command": ["/bin/bash", "-c", "--"], + "args": [ + "touch /tmp/healthy aws_out.log; " + f"while true; echo running: {s3_sync_command}; " + f"{s3_sync_command} &> aws_out.log; cat aws_out.log; " + "if [ -s aws_out.log ]; then rm /tmp/healthy; fi; " + f"chown -R 50000:0 /dags; do sleep {s3_sync_wait}; done;" + ], + "volumeMounts": volume_mounts, + "securityContext": {"runAsUser": 0}, + "livenessProbe": { + "exec": {"command": ["cat", "/tmp/healthy"]}, + "initialDelaySeconds": 60, + "periodSeconds": 15, + "failureThreshold": 1, + }, + "resources": env.airflow_config.get("resources", {}).get( + "s3_sync", {} + ), + }, + ], + "extraVolumes": extra_volumes, + "extraVolumeMounts": extra_volume_mounts, + } + + if "dagProcessor" in values: + values["dagProcessor"].update(s3_sync_sidecar) + else: + values["scheduler"].update(s3_sync_sidecar) + + values["triggerer"].update(deepcopy(s3_sync_sidecar)) + + worker_values = { + "extraInitContainers": [ + { + "name": "s3-sync", + "image": f"{aws_repo}:{aws_tag}", + "imagePullPolicy": "IfNotPresent", + "command": ["/bin/bash", "-c", "--"], + "args": [ + f"aws s3 cp --recursive {s3_path} /dags {s3_extra_params}; " + f"chown -R 50000:0 /dags;" + ], + "volumeMounts": volume_mounts, + "securityContext": {"runAsUser": 0}, + } + ], + "extraVolumes": extra_volumes, + "extraVolumeMounts": extra_volume_mounts, + } + values["workers"].update(worker_values) + + if not dags_source_secret_name: + # When credentials are passed through a Service Account + iam_role_annot = { + "annotations": { + "eks.amazonaws.com/role-arn": env.airflow_config["s3_sync"][ + "iam_role" + ] + } + } + values["scheduler"]["serviceAccount"] = iam_role_annot + values["triggerer"]["serviceAccount"] = iam_role_annot + values["workers"]["serviceAccount"] = iam_role_annot + + @classmethod + def _gen_airflow_webserver_secret(cls, env: Environment): + return { + "webserver-secret-key": env.airflow_config["cookie_secret"], + } + + @classmethod + def _gen_airflow_git_sync_secret(cls, env: Environment): + if env.project.clone_strategy == env.project.HTTP_CLONE_STRATEGY: + creds = env.project.deploy_credentials + return { + # git-sync v3 + "GIT_SYNC_USERNAME": creds["git_username"], + "GIT_SYNC_PASSWORD": creds["git_password"], + # git-sync v4 + "GITSYNC_USERNAME": creds["git_username"], + "GITSYNC_PASSWORD": creds["git_password"], + } + elif env.project.clone_strategy == env.project.SSH_CLONE_STRATEGY: + return { + "gitSshKey": env.project.deploy_key.private, + } + else: + return {} + + @classmethod + def _gen_airflow_s3_sync_secret(cls, env: Environment): + access_key = env.airflow_config["s3_sync"]["access_key"] + secret_key = env.airflow_config["s3_sync"]["secret_key"] + return { + "credentials": "[default]\n" + f"aws_access_key_id = {access_key}\n" + f"aws_secret_access_key = {secret_key}" + } + + @classmethod + def _gen_airflow_extra_env_secret(cls, env: Environment): + credentials = env.service_credentials.select_related( + "connection_template" + ).filter(service="airflow", validated_at__isnull=False) + env_vars = {} + for credential in credentials: + connection = credential.combined_connection() + connection["type"] = credential.connection_template.type_slug + for key, value in connection.items(): + name = ( + slugify(f"DATACOVES__{credential.name}__{key}") + .replace("-", "_") + .upper() + ) + env_vars[name] = value + + smtp_integration = cls.get_enabled_integrations( + env, Integration.INTEGRATION_TYPE_SMTP + ).first() + if smtp_integration: + integration = smtp_integration.integration.settings + + if env.airflow_config["secrets_backend_enabled"]: + if integration.get("server") == "datacoves": + if ( + settings.EMAIL_BACKEND + == "django.core.mail.backends.smtp.EmailBackend" + ): + login = settings.EMAIL_HOST_USER + password = settings.EMAIL_HOST_PASSWORD + elif env.cluster.is_local: + # Local may not have a valid email configuration. + login = "none" + password = "none" + else: + import sentry_sdk + + sentry_sdk.capture_message( + "Unsupported Email Backend. This is probably " + "a configuration problem with SMTP on this " + "cluster. Environments will not sync until this " + "is corrected." + ) + + raise Exception( + "Unsupported Email Backend. Please review Django settings." + ) + else: + login = integration.get("user", "") + password = integration.get("password", "") + + Secret.objects.update_or_create( + slug=cls._get_smtp_connection_id(env), + project=env.project, + defaults={ + "value_format": Secret.VALUE_FORMAT_KEY_VALUE, + "sharing_scope": Secret.SHARED_ENVIRONMENT, + "environment": env, + "services": True, + "created_by": User.objects.get( + email=cls.get_service_account_email(env) + ), + "value": { + "conn_type": "Email", + "login": login, + "password": password, + }, + }, + ) + else: + if integration.get("server") == "datacoves": + env_vars["AIRFLOW_CONN_SMTP_DEFAULT"] = json.dumps( + { + "conn_type": "Email", + "login": settings.EMAIL_HOST_USER, + "password": settings.EMAIL_HOST_PASSWORD, + } + ) + else: + env_vars["AIRFLOW_CONN_SMTP_DEFAULT"] = json.dumps( + { + "conn_type": "Email", + "login": integration.get("user", ""), + "password": integration.get("password", ""), + } + ) + + msteams_integrations = cls.get_enabled_integrations( + env, Integration.INTEGRATION_TYPE_MSTEAMS + ) + + # NOTE: Regarding the notification integration variable; this is + # currently hard coded for My Airflow and the My Airflow version is + # set in the airflow_config mixin class. + # + # If, in the future, you're looking for where to change this for + # My Airflow, look there instead. + if msteams_integrations: + env_vars["DATACOVES__AIRFLOW_NOTIFICATION_INTEGRATION"] = "MSTEAMS" + for integration in msteams_integrations: + integration_settings = integration.integration.settings + parsed_url = urlparse(integration_settings["webhook_url"]) + conn_schema = parsed_url.scheme + conn_host = parsed_url.geturl().replace(f"{conn_schema}://", "", 1) + + env_vars[ + f"AIRFLOW_CONN_{slugify(integration.integration.name).replace('-', '_').upper()}" + ] = json.dumps( + { + "conn_type": "http", + "host": conn_host, + "schema": conn_schema, + "login": integration_settings.get("user", ""), + "password": integration_settings.get("password", ""), + } + ) + + slack_integrations = cls.get_enabled_integrations( + env, Integration.INTEGRATION_TYPE_SLACK + ) + if slack_integrations: + env_vars["DATACOVES__AIRFLOW_NOTIFICATION_INTEGRATION"] = "SLACK" + for integration in slack_integrations: + integration_settings = integration.integration.settings + parsed_url = urlparse(integration_settings["webhook_url"]) + env_vars[ + f"AIRFLOW_CONN_{slugify(integration.integration.name).replace('-', '_').upper()}" + ] = json.dumps( + { + "conn_type": "http", + "host": parsed_url.geturl(), + "password": integration_settings.get("api_key", ""), + } + ) + + return env_vars + + @classmethod + def _configure_external_metadata_db(cls, env: Environment, values): + metadata_db_secret = make.hashed_secret( + name="airflow-metadata-secret", + data=cls._gen_airflow_metadata_db_secret(env), + ) + + db_config = env.airflow_config["db"] + values["postgresql"] = {"enabled": False} + + # https://github.com/apache/airflow/blob/fc7d9835a70abaf834bc4ebc3472612d3a5574e4/chart/templates/_helpers.yaml#L494 + values["data"] = { + "metadataSecretName": metadata_db_secret["metadata"]["name"], + "metadataConnection": { + "user": db_config["user"], + "pass": db_config["password"], + "host": db_config["host"], + "db": db_config["database"], + "port": db_config.get("port", 5432), + "sslmode": db_config.get("sslmode", "disable"), + }, + } + + return [metadata_db_secret] + + @classmethod + def _gen_airflow_metadata_db_secret(cls, env: Environment): + # https://airflow.apache.org/docs/helm-chart/stable/parameters-ref.html#database + db_config = deepcopy(env.airflow_config["db"]) + if "connection" in db_config: + connection = db_config["connection"] + else: + if env.airflow_config.get("pgbouncer", {}).get("enabled", False): + db_config["host"] = f"{env.slug}-airflow-pgbouncer" + db_config["port"] = 6543 + db_config["database"] = f"{env.slug}-airflow-metadata" + + host = db_config["host"] + port = db_config.get("port", 5432) + user = db_config["user"] + password = db_config["password"] + database = db_config["database"] + connection = f"postgresql://{user}:{password}@{host}:{port}/{database}" + + return { + "connection": connection, + } + + @classmethod + def _configure_s3_logs(cls, env: Environment, values: dict): + connection_id = "LOGS_S3" + env_var_name = f"AIRFLOW_CONN_{connection_id}" + + if env.airflow_config["logs"].get("backend", "s3") == "minio": + secret = cls._gen_external_logs_secret_minio(env, env_var_name) + else: + secret = cls._gen_external_logs_secret_s3(env, env_var_name) + + values.setdefault("secret", []).append( + { + "envName": env_var_name, + "secretName": secret["metadata"]["name"], + "secretKey": env_var_name, # The name of the key + } + ) + bucket = env.airflow_config["logs"]["s3_log_bucket"] + remote_base_log_folder = f"s3://{bucket}" + set_in( + values, + ("config", "logging", "remote_base_log_folder"), + remote_base_log_folder, + ) + set_in(values, ("config", "logging", "remote_log_conn_id"), connection_id) + set_in(values, ("config", "logging", "remote_logging"), "true") + return [secret] + + @classmethod + def _configure_loki_logs(cls, env: Environment, values: dict): + connection_id = "LOGS_LOKI" + env_var_name = f"AIRFLOW_CONN_{connection_id}" + secret = cls._gen_external_logs_secret_loki(env, env_var_name) + values.setdefault("secret", []).append( + { + "envName": env_var_name, + "secretName": secret["metadata"]["name"], + "secretKey": env_var_name, # The name of the key + } + ) + + set_in(values, ("config", "logging", "remote_base_log_folder"), "loki") + set_in(values, ("config", "logging", "remote_log_conn_id"), connection_id) + return [secret] + + @classmethod + def _gen_external_logs_secret_s3(cls, env: Environment, env_var_name: str): + logs_config = env.airflow_config["logs"] + return make.hashed_secret( + name="airflow-logs-secret", + data={ + env_var_name: json.dumps( + { + "conn_type": "Amazon S3", + "extra": { + "aws_access_key_id": logs_config["access_key"], + "aws_secret_access_key": logs_config["secret_key"], + }, + } + ) + }, + ) + + @classmethod + def _gen_external_logs_secret_minio(cls, env: Environment, env_var_name: str): + auth_config = env.minio_config["auth"] + hostname = MinioAdapter.deployment_name.format(env_slug=env.slug) + return make.hashed_secret( + name="airflow-logs-secret", + data={ + env_var_name: json.dumps( + { + "conn_type": "Amazon S3", + "extra": { + "host": f"http://{hostname}:9000", + "aws_access_key_id": auth_config["root_user"], + "aws_secret_access_key": auth_config["root_password"], + }, + } + ) + }, + ) + + @classmethod + def _gen_external_logs_secret_loki(cls, env: Environment, env_var_name: str): + data = { + "conn_type": "loki", + "host": env.airflow_config["logs"]["loki_host"], + "port": env.airflow_config["logs"].get("loki_port", "80"), + } + + if env.airflow_config["logs"].get("loki_multi_tenancy", True): + data.update({"extra": {"X-Scope-OrgID": env.k8s_namespace}}) + + if env.airflow_config["logs"].get("loki_login"): + data.update( + { + "login": env.airflow_config["logs"]["loki_login"], + "password": env.airflow_config["logs"]["loki_password"], + } + ) + + return make.hashed_secret( + name="airflow-logs-secret", + data={env_var_name: json.dumps(data)}, + ) + + @classmethod + def _configure_efs_logs(cls, env: Environment, values: dict): + is_local = env.cluster.is_local + + logs_config = env.airflow_config["logs"] + logs_pvc_size = "1Gi" if is_local else "100Gi" + volume_handle = logs_config["volume_handle"] + keep_old_efs_volume = logs_config.get("keep_old_efs_volume", False) + pv_name = ( + f"airflow-logs-{env.slug}" + if keep_old_efs_volume + else f"airflow-logs-{env.slug}-{make.string_hash(volume_handle)}" + ) + efs_volume = make.efs_persistent_volume(pv_name, volume_handle, logs_pvc_size) + pvc_name = ( + cls.AIRFLOW_LOGS_PVC_NAME + if keep_old_efs_volume + else f"airflow-logs-{make.string_hash(pv_name)}" + ) + efs_storage_class = make.efs_storage_class() + efs_volume_claim = make.persistent_volume_claim( + pvc_name, + efs_storage_class["metadata"]["name"], + logs_pvc_size, + efs_volume["metadata"]["name"], + ) + + cls._configure_volume_logs(env, values, pvc_name) + res = [ + efs_volume, + efs_volume_claim, + ] + + if not is_local: + res.append(efs_storage_class) + + return res + + @classmethod + def _configure_afs_logs(cls, env: Environment, values: dict): + """Create a pvc to Azure Files""" + is_local = env.cluster.is_local + pvc_name = cls.AIRFLOW_LOGS_PVC_NAME + pvc_local = None + try: + env.cluster.kubectl.CoreV1Api.read_namespaced_persistent_volume_claim( + name=pvc_name, namespace=env.k8s_namespace + ) + except K8ApiException as e: + if e.status != 404: + raise e + + pvc_local = { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": {"name": pvc_name}, + "spec": { + "accessModes": ["ReadWriteMany"], + "resources": { + "requests": {"storage": "1Gi" if is_local else "20Gi"} + }, + "storageClassName": "azurefile-csi", + }, + } + + cls._configure_volume_logs(env, values, pvc_name) + return [pvc_local] if pvc_local else [] + + @classmethod + def _configure_nfs_logs(cls, env: Environment, values: dict): + """Create a pvc to NFS""" + pvc_name = cls.AIRFLOW_LOGS_PVC_NAME + pvc_local = None + try: + env.cluster.kubectl.CoreV1Api.read_namespaced_persistent_volume_claim( + name=pvc_name, namespace=env.k8s_namespace + ) + except K8ApiException as e: + if e.status != 404: + raise e + + pvc_local = { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": {"name": pvc_name}, + "spec": { + "accessModes": ["ReadWriteMany"], + "resources": {"requests": {"storage": "1Gi"}}, + "storageClassName": "nfs", + }, + } + + cls._configure_volume_logs(env, values, pvc_name) + return [pvc_local] if pvc_local else [] + + @classmethod + def _configure_volume_logs(cls, env: Environment, values: dict, pvc_name: str): + values["logs"] = { + "persistence": {"enabled": True, "existingClaim": pvc_name}, + } + + # Workaround based on https://stackoverflow.com/questions/63510335/ + # airflow-on-kubernetes-errno-13-permission-denied-opt-airflow-logs-schedule + if "scheduler" not in values: + values["scheduler"] = {} + + values["scheduler"]["extraInitContainers"] = [ + { + "name": "fix-volume-logs-permissions", + "image": ":".join(env.get_service_image("airflow", "busybox")), + # chown adjusted to cover just one level as it could take > 10 mins to run on EFS + "command": [ + "sh", + "-c", + "chown 50000:0 /opt/airflow/logs/ && chown 50000:0 /opt/airflow/logs/*", + ], + "securityContext": {"runAsUser": 0}, + "volumeMounts": [{"mountPath": "/opt/airflow/logs/", "name": "logs"}], + } + ] + + @classmethod + def get_internal_service_config(cls, env: Environment, name: str) -> dict: + if name == "minio": + config = env.airflow_config["logs"] + if config["backend"] == "minio" and not config["external"]: + return {"buckets": [config["s3_log_bucket"]]} + return None + + @classmethod + def get_writable_config(cls, env: Environment) -> dict: + return { + "dags_folder": env.airflow_config.get("dags_folder"), + "yaml_dags_folder": env.airflow_config.get("yaml_dags_folder"), + "dags_source": env.airflow_config.get("dags_source"), + "git_branch": env.airflow_config.get("git_branch"), + "s3_sync": env.airflow_config.get("s3_sync"), + "logs": env.airflow_config.get("logs"), + "resources": env.airflow_config.get("resources"), + "api_enabled": env.is_service_enabled("airflow") + and env.airflow_config.get( + "api_enabled", env.cluster.is_feature_enabled("admin_secrets") + ), + } + + @classmethod + def _gen_service_monitors(cls, env: Environment) -> list: + service_monitors = [] + + airflow_statd = { + "apiVersion": "monitoring.coreos.com/v1", + "kind": "ServiceMonitor", + "metadata": { + "name": cls.service_name, + "labels": {"app": "airflow-statsd", "release": "prometheus"}, + }, + "spec": { + "selector": { + "matchLabels": { + "component": "statsd", + "release": f"{env.slug}-airflow", + "tier": cls.service_name, + } + }, + "endpoints": [ + {"port": "statsd-scrape", "path": "/metrics", "interval": "15s"} + ], + }, + } + + service_monitors.append(airflow_statd) + + if env.airflow_config.get("pgbouncer", {}).get("enabled", False): + pgbouncer = { + "apiVersion": "monitoring.coreos.com/v1", + "kind": "ServiceMonitor", + "metadata": { + "name": "airflow-pgbouncer", + "labels": {"app": "airflow-pgbouncer", "release": "prometheus"}, + }, + "spec": { + "selector": { + "matchLabels": { + "component": "pgbouncer", + "release": f"{env.slug}-airflow", + "tier": cls.service_name, + } + }, + "endpoints": [ + { + "port": "pgbouncer-metrics", + "path": "/metrics", + "interval": "15s", + } + ], + }, + } + + service_monitors.append(pgbouncer) + + return service_monitors + + @classmethod + def _gen_cronjob_to_cleanup_full_logs( + cls, env: Environment, pvc_logs_name: str + ) -> dict: + """ + The cleanup of Airflow only deletes files. + This cron job deletes files and directories + FIXME: When update Airflow version https://github.com/apache/airflow/pull/33252 + """ + from lib.kubernetes.k8s_utils import gen_cron_job + + command = ["/bin/sh", "-c", "find /mnt/logs -type d -empty -delete || true"] + + image = ":".join(env.get_service_image(service="airflow", repo="busybox")) + + volumes = { + "volume_mounts": [{"mountPath": "/mnt/logs", "name": "logs"}], + "volumes": [ + {"name": "logs", "persistentVolumeClaim": {"claimName": pvc_logs_name}} + ], + } + + cron_job = gen_cron_job( + name=f"{env.slug}-airflow-cleanup-logs", + namespace=env.k8s_namespace, + schedule="0 * * * *", # every-1-hour + image=image, + command=command, + image_pull_secret=env.docker_config_secret_name, + volumes=volumes, + labels=cls._get_labels_adapter(), + ) + + return cron_job + + @classmethod + def on_post_enabled(cls, env: Environment) -> dict: + config = {} + setup_airflow_roles.apply_async((env.slug,), countdown=60) + cls._create_read_only_db_user(env=env, is_async=True) + return config diff --git a/src/core/api/app/clusters/adapters/airflow/pod-template-file.kubernetes-helm-yaml b/src/core/api/app/clusters/adapters/airflow/pod-template-file.kubernetes-helm-yaml new file mode 100644 index 00000000..897c5c79 --- /dev/null +++ b/src/core/api/app/clusters/adapters/airflow/pod-template-file.kubernetes-helm-yaml @@ -0,0 +1,269 @@ +{{/* + Licensed to the Apache Software Foundation (ASF) under one + or more contributor license agreements. See the NOTICE file + distributed with this work for additional information + regarding copyright ownership. The ASF licenses this file + to you under the Apache License, Version 2.0 (the + "License"); you may not use this file except in compliance + with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, + software distributed under the License is distributed on an + "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + KIND, either express or implied. See the License for the + specific language governing permissions and limitations + under the License. + + NOTE NOTE NOTE + + This file comes from here: + + https://github.com/apache/airflow/blob/main/chart/files/pod-template-file.kubernetes-helm-yaml + + Just in case we need to fetch a new version of it in the future. + Changes that I have made will be commented with 'NODE LOCAL' below. +*/}} +--- +{{- $nodeSelector := or .Values.workers.nodeSelector .Values.nodeSelector }} +{{- $affinity := or .Values.workers.affinity .Values.affinity }} +{{- $tolerations := or .Values.workers.tolerations .Values.tolerations }} +{{- $topologySpreadConstraints := or .Values.workers.topologySpreadConstraints .Values.topologySpreadConstraints }} +{{- $securityContext := include "airflowPodSecurityContext" (list . .Values.workers) }} +{{- $containerSecurityContextKerberosSidecar := include "containerSecurityContext" (list . .Values.workers.kerberosSidecar) }} +{{- $containerLifecycleHooksKerberosSidecar := or .Values.workers.kerberosSidecar.containerLifecycleHooks .Values.containerLifecycleHooks }} +{{- $containerSecurityContext := include "containerSecurityContext" (list . .Values.workers) }} +{{- $containerLifecycleHooks := or .Values.workers.containerLifecycleHooks .Values.containerLifecycleHooks }} +{{- $safeToEvict := dict "cluster-autoscaler.kubernetes.io/safe-to-evict" (.Values.workers.safeToEvict | toString) }} +{{- $podAnnotations := mergeOverwrite (deepCopy .Values.airflowPodAnnotations) $safeToEvict .Values.workers.podAnnotations }} +apiVersion: v1 +kind: Pod +metadata: + name: placeholder-name + labels: + tier: airflow + component: worker + release: {{ .Release.Name }} + {{- if or (.Values.labels) (.Values.workers.labels) }} + {{- mustMerge .Values.workers.labels .Values.labels | toYaml | nindent 4 }} + {{- end }} + annotations: + {{- toYaml $podAnnotations | nindent 4 }} + {{- if .Values.workers.kerberosInitContainer.enabled }} + checksum/kerberos-keytab: {{ include (print $.Template.BasePath "/secrets/kerberos-keytab-secret.yaml") . | sha256sum }} + {{- end }} +spec: + initContainers: + {{- if and .Values.dags.gitSync.enabled (not .Values.dags.persistence.enabled) }} + {{- include "git_sync_container" (dict "Values" .Values "is_init" "true" "Template" .Template) | nindent 4 }} + {{- end }} + {{- if .Values.workers.extraInitContainers }} + {{- tpl (toYaml .Values.workers.extraInitContainers) . | nindent 4 }} + {{- end }} + {{- if and (semverCompare ">=2.8.0" .Values.airflowVersion) .Values.workers.kerberosInitContainer.enabled }} + - name: kerberos-init + image: {{ template "airflow_image" . }} + imagePullPolicy: {{ .Values.images.airflow.pullPolicy }} + args: ["kerberos", "-o"] + resources: {{- toYaml .Values.workers.kerberosInitContainer.resources | nindent 8 }} + volumeMounts: + - name: logs + mountPath: {{ template "airflow_logs" . }} + {{- include "airflow_config_mount" . | nindent 8 }} + - name: config + mountPath: {{ .Values.kerberos.configPath | quote }} + subPath: krb5.conf + readOnly: true + - name: kerberos-keytab + subPath: "kerberos.keytab" + mountPath: {{ .Values.kerberos.keytabPath | quote }} + readOnly: true + - name: kerberos-ccache + mountPath: {{ .Values.kerberos.ccacheMountPath | quote }} + readOnly: false + {{- if .Values.volumeMounts }} + {{- toYaml .Values.volumeMounts | nindent 8 }} + {{- end }} + {{- if .Values.workers.extraVolumeMounts }} + {{- tpl (toYaml .Values.workers.extraVolumeMounts) . | nindent 8 }} + {{- end }} + {{- if or .Values.webserver.webserverConfig .Values.webserver.webserverConfigConfigMapName }} + {{- include "airflow_webserver_config_mount" . | nindent 8 }} + {{- end }} + envFrom: {{- include "custom_airflow_environment_from" . | default "\n []" | indent 6 }} + env: + - name: KRB5_CONFIG + value: {{ .Values.kerberos.configPath | quote }} + - name: KRB5CCNAME + value: {{ include "kerberos_ccache_path" . | quote }} + {{- include "custom_airflow_environment" . | indent 6 }} + {{- include "standard_airflow_environment" . | indent 6 }} + {{- end }} + containers: + - envFrom: {{- include "custom_airflow_environment_from" . | default "\n []" | indent 6 }} + env: + - name: AIRFLOW__CORE__EXECUTOR + value: LocalExecutor + {{- include "standard_airflow_environment" . | indent 6}} + {{- include "custom_airflow_environment" . | indent 6 }} + {{- include "container_extra_envs" (list . .Values.workers.env) | indent 6 }} + image: {{ template "pod_template_image" . }} + imagePullPolicy: {{ .Values.images.pod_template.pullPolicy }} + securityContext: {{ $containerSecurityContext | nindent 8 }} + {{- if $containerLifecycleHooks }} + lifecycle: {{- tpl (toYaml $containerLifecycleHooks) . | nindent 8 }} + {{- end }} + name: base + {{- if .Values.workers.command }} + command: {{ tpl (toYaml .Values.workers.command) . | nindent 8 }} + {{- end }} + resources: {{- toYaml .Values.workers.resources | nindent 8 }} + volumeMounts: + - mountPath: {{ template "airflow_logs" . }} + name: logs + {{- include "airflow_config_mount" . | nindent 8 }} + {{- if or .Values.dags.gitSync.enabled .Values.dags.persistence.enabled }} + {{- include "airflow_dags_mount" . | nindent 8 }} + {{- end }} + {{- if .Values.volumeMounts }} + {{- toYaml .Values.volumeMounts | nindent 8 }} + {{- end }} + {{- if .Values.workers.extraVolumeMounts }} + {{- tpl (toYaml .Values.workers.extraVolumeMounts) . | nindent 8 }} + {{- end }} + {{- if .Values.kerberos.enabled }} + - name: kerberos-keytab + subPath: "kerberos.keytab" + mountPath: {{ .Values.kerberos.keytabPath | quote }} + readOnly: true + - name: config + mountPath: {{ .Values.kerberos.configPath | quote }} + subPath: krb5.conf + readOnly: true + - name: kerberos-ccache + mountPath: {{ .Values.kerberos.ccacheMountPath | quote }} + readOnly: true + {{- end }} + {{- if .Values.workers.kerberosSidecar.enabled }} + - name: worker-kerberos + image: {{ template "airflow_image" . }} + imagePullPolicy: {{ .Values.images.airflow.pullPolicy }} + securityContext: {{ $containerSecurityContextKerberosSidecar | nindent 8 }} + {{- if $containerLifecycleHooksKerberosSidecar }} + lifecycle: {{- tpl (toYaml $containerLifecycleHooksKerberosSidecar) . | nindent 8 }} + {{- end }} + args: ["kerberos"] + resources: {{- toYaml .Values.workers.kerberosSidecar.resources | nindent 8 }} + volumeMounts: + - name: logs + mountPath: {{ template "airflow_logs" . }} + {{- include "airflow_config_mount" . | nindent 8 }} + - name: config + mountPath: {{ .Values.kerberos.configPath | quote }} + subPath: krb5.conf + readOnly: true + - name: kerberos-keytab + subPath: "kerberos.keytab" + mountPath: {{ .Values.kerberos.keytabPath | quote }} + readOnly: true + - name: kerberos-ccache + mountPath: {{ .Values.kerberos.ccacheMountPath | quote }} + readOnly: false + {{- if .Values.volumeMounts }} + {{- toYaml .Values.volumeMounts | nindent 8 }} + {{- end }} + {{- if .Values.workers.extraVolumeMounts }} + {{- tpl (toYaml .Values.workers.extraVolumeMounts) . | nindent 8 }} + {{- end }} + {{- if or .Values.webserver.webserverConfig .Values.webserver.webserverConfigConfigMapName }} + {{- include "airflow_webserver_config_mount" . | nindent 8 }} + {{- end }} + envFrom: {{- include "custom_airflow_environment_from" . | default "\n []" | indent 6 }} + env: + - name: KRB5_CONFIG + value: {{ .Values.kerberos.configPath | quote }} + - name: KRB5CCNAME + value: {{ include "kerberos_ccache_path" . | quote }} + {{- include "custom_airflow_environment" . | indent 6 }} + {{- include "standard_airflow_environment" . | indent 6 }} + {{- end }} + {{- if .Values.workers.extraContainers }} + {{- tpl (toYaml .Values.workers.extraContainers) . | nindent 4 }} + {{- end }} + {{- if .Values.workers.priorityClassName }} + priorityClassName: {{ .Values.workers.priorityClassName }} + {{- end }} + {{- if .Values.workers.runtimeClassName }} + runtimeClassName: {{ .Values.workers.runtimeClassName }} + {{- end }} + {{- if or .Values.registry.secretName .Values.registry.connection }} + imagePullSecrets: + - name: {{ template "registry_secret" . }} + {{- end }} + {{- if .Values.workers.hostAliases }} + hostAliases: {{- toYaml .Values.workers.hostAliases | nindent 4 }} + {{- end }} + restartPolicy: Never + securityContext: {{ $securityContext | nindent 4 }} + nodeSelector: {{- toYaml $nodeSelector | nindent 4 }} + affinity: {{- toYaml $affinity | nindent 4 }} + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.workers.terminationGracePeriodSeconds }} + tolerations: {{- toYaml $tolerations | nindent 4 }} + topologySpreadConstraints: {{- toYaml $topologySpreadConstraints | nindent 4 }} + serviceAccountName: {{ include "worker.serviceAccountName" . }} + {{/* NODE LOCAL Start */}} + dnsPolicy: None + dnsConfig: + nameservers: ["169.254.20.25","10.96.0.10"] + searches: + - core.svc.cluster.local + - svc.cluster.local + - cluster.local + options: + - name: ndots + value: "1" + - name: attempts + value: "5" + - name: timeout + value: "5" + {{/* NODE LOCAL End */}} + volumes: + {{- if .Values.dags.persistence.enabled }} + - name: dags + persistentVolumeClaim: + claimName: {{ template "airflow_dags_volume_claim" . }} + {{- else if .Values.dags.gitSync.enabled }} + - name: dags + emptyDir: {{- toYaml (default (dict) .Values.dags.gitSync.emptyDirConfig) | nindent 6 }} + {{- end }} + {{- if .Values.logs.persistence.enabled }} + - name: logs + persistentVolumeClaim: + claimName: {{ template "airflow_logs_volume_claim" . }} + {{- else }} + - emptyDir: {{- toYaml (default (dict) .Values.logs.emptyDirConfig) | nindent 6 }} + name: logs + {{- end }} + {{- if and .Values.dags.gitSync.enabled .Values.dags.gitSync.sshKeySecret }} + {{- include "git_sync_ssh_key_volume" . | nindent 2 }} + {{- end }} + - configMap: + name: {{ include "airflow_config" . }} + name: config + {{- if .Values.volumes }} + {{- toYaml .Values.volumes | nindent 2 }} + {{- end }} + {{- if .Values.kerberos.enabled }} + - name: kerberos-keytab + secret: + secretName: {{ include "kerberos_keytab_secret" . | quote }} + - name: kerberos-ccache + emptyDir: {} + {{- end }} + {{- if .Values.workers.extraVolumes }} + {{- tpl (toYaml .Values.workers.extraVolumes) . | nindent 2 }} + {{- end }} \ No newline at end of file diff --git a/src/core/api/app/clusters/adapters/airflow/security_manager.py b/src/core/api/app/clusters/adapters/airflow/security_manager.py new file mode 100644 index 00000000..a1277e37 --- /dev/null +++ b/src/core/api/app/clusters/adapters/airflow/security_manager.py @@ -0,0 +1,66 @@ +from typing import Any + +from jose import jwt +from requests import request + +from airflow.www.security import AirflowSecurityManager + + +class CustomSecurityManager(AirflowSecurityManager): + def request(self, url, method="GET", *args, **kwargs): + kwargs.setdefault("headers", {}) + response = request(method, url, *args, **kwargs) + response.raise_for_status() + return response + + def get_jwks(self, url, *args, **kwargs): + return self.request(url, *args, **kwargs).json() + + def get_oauth_user_info( + self, provider: str, resp: dict[str, Any] + ) -> dict[str, Any]: + id_token = resp["id_token"] + metadata = self.appbuilder.sm.oauth_remotes[provider].server_metadata + jwks = self.get_jwks(metadata["jwks_uri"]) + audience = self.appbuilder.sm.oauth_remotes[provider].client_id + payload = jwt.decode( + id_token, + jwks, + algorithms=["RS256"], + audience=audience, + issuer=metadata["issuer"], + access_token=resp["access_token"], + ) + name_parts = payload["name"].split(" ", 1) + first_name = name_parts[0] + last_name = name_parts[1] if len(name_parts) > 1 else "" + permissions = payload.get("permissions", []) + + # FORCE_ADMIN_ROLE is set by _gen_airflow_webserver_config + if FORCE_ADMIN_ROLE: # noqa + roles = ["Admin"] + else: + # Define roles based on permissions + role_mapping = { + "*|write": "Admin", # Admin role when any "*|write" permission is present + "security|write": "Admin", # Admin role when "security|write" is present + "admin|write": "Op", # Op role when "admin|write" is present + "sysadmin|write": "SysAdmin", # SysAdmin role when "sysadmin|read" is present + "dags|write": "User", # User role when "dags|write" is present + } + + # Check if any permission in the role_mapping should assign a role + roles = [role_mapping[perm] for perm in permissions if perm in role_mapping] + + # If no specific role is assigned, default to Viewer + if not roles: + roles = ["Viewer"] + + # Return the user info with the assigned roles + return { + "email": payload["email"], + "username": payload["email"], + "first_name": first_name, + "last_name": last_name, + "role_keys": roles, + } diff --git a/src/core/api/app/clusters/adapters/all.py b/src/core/api/app/clusters/adapters/all.py new file mode 100644 index 00000000..a23622bb --- /dev/null +++ b/src/core/api/app/clusters/adapters/all.py @@ -0,0 +1,49 @@ +from django.conf import settings + +from .airbyte import AirbyteAdapter +from .airflow import AirflowAdapter +from .code_server import CodeServerAdapter +from .datahub import DataHubAdapter +from .dbt_docs import DbtDocsAdapter +from .elastic import ElasticAdapter +from .grafana import GrafanaAdapter +from .kafka import KafkaAdapter +from .minio import MinioAdapter +from .neo4j import Neo4jAdapter +from .pomerium import PomeriumAdapter +from .postgresql import PostgreSQLAdapter +from .superset import SupersetAdapter + +INTERNAL_ADAPTERS = { + settings.INTERNAL_SERVICE_MINIO: MinioAdapter, + settings.INTERNAL_SERVICE_POMERIUM: PomeriumAdapter, + settings.INTERNAL_SERVICE_ELASTIC: ElasticAdapter, + settings.INTERNAL_SERVICE_NEO4J: Neo4jAdapter, + settings.INTERNAL_SERVICE_POSTGRESQL: PostgreSQLAdapter, + settings.INTERNAL_SERVICE_KAFKA: KafkaAdapter, + settings.INTERNAL_SERVICE_GRAFANA: GrafanaAdapter, +} + +EXTERNAL_ADAPTERS = { + settings.SERVICE_AIRBYTE: AirbyteAdapter, + settings.SERVICE_AIRFLOW: AirflowAdapter, + settings.SERVICE_DBT_DOCS: DbtDocsAdapter, + settings.SERVICE_SUPERSET: SupersetAdapter, + settings.SERVICE_CODE_SERVER: CodeServerAdapter, + settings.SERVICE_DATAHUB: DataHubAdapter, +} + +ADAPTERS = INTERNAL_ADAPTERS.copy() +ADAPTERS.update(EXTERNAL_ADAPTERS) + + +def get_supported_integrations(service_name): + return EXTERNAL_ADAPTERS[service_name].supported_integrations + + +def get_default_values() -> dict: + """Returns the list of default values for each adapter""" + default_values = {} + for name, adapter in EXTERNAL_ADAPTERS.items(): + default_values[name] = adapter.get_default_values() + return default_values diff --git a/src/core/api/app/clusters/adapters/code_server.py b/src/core/api/app/clusters/adapters/code_server.py new file mode 100644 index 00000000..4add1304 --- /dev/null +++ b/src/core/api/app/clusters/adapters/code_server.py @@ -0,0 +1,860 @@ +import copy +import json +import logging +from math import ceil + +import jinja2.exceptions +from clusters.adapters import EnvironmentAdapter +from clusters.models import Cluster +from codegen.models import Template +from codegen.templating import ( + build_environment_context, + build_user_context, + build_user_credentials_context, +) +from django.conf import settings +from django.db.models import Q +from projects.models.environment import Environment +from projects.models.repository import UserRepository +from projects.models.user_environment import UserEnvironment +from rest_framework.authtoken.models import Token +from users.models import User + +from lib.kubernetes import make +from lib.kubernetes.k8s_utils import ( + KubeUnitsMemory, + k8s_convert_to_cpu, + k8s_convert_to_mebibytes, + k8s_extract_numerical_value_and_units, + k8s_resources_combine, +) + +from .mixins.airflow_config import AirflowConfigMixin + +logger = logging.getLogger(__name__) + +REPO_PATH = "/config/workspace" + + +class CodeServerAdapter(EnvironmentAdapter, AirflowConfigMixin): + """WARNING: Be mindful that these methods may be called in a loop, + such as by workspace.sync. Thus, it is very easy to accidentally + create a situation where one of these methods creates a performance + issue. + + You can add to the select_related / prefetch_related fields in + workspace.SyncTask and use the is_relation_cached to use pre-fetched + data instead of running bespoke queries. + """ + + service_name = settings.SERVICE_CODE_SERVER + linked_service_names = [ + settings.SERVICE_LOCAL_DBT_DOCS, + settings.SERVICE_LOCAL_AIRFLOW, + ] + deployment_name = "code-server-{user_slug}" + over_provisioning_name = "overprovisioning" + over_provisioning_name_ns = "core" + code_server_resources_default = { + "requests": {"memory": "500Mi", "cpu": "100m"}, + "limits": {"memory": "3Gi", "cpu": "1"}, + } + + # TODO: This configuration + # (dbt_docs_resources_default and dbt_core_interface_resources_default) + # should be dynamic calculated based on the code server resources + dbt_docs_resources_default = { + "requests": {"memory": "50Mi", "cpu": "10m"}, + "limits": {"memory": "250Mi", "cpu": "100m"}, + } + dbt_core_interface_resources_default = { + "requests": {"memory": "250Mi", "cpu": "100m"}, + "limits": {"memory": "1Gi", "cpu": "1"}, + } + + # Based off resource settings for webserver/workers for airflow. + local_airflow_resources_default = { + "requests": {"cpu": "50m", "memory": "250Mi"}, + "limits": {"cpu": "1", "memory": "3Gi"}, + } + + @classmethod + def get_cluster_default_config(cls, cluster: Cluster, source: dict = None) -> dict: + config = super().get_cluster_default_config(cluster=cluster, source=source) + max_code_server_pods_per_node = config.get( + "max_code_server_pods_per_node", + 16 if cluster.provider == Cluster.EKS_PROVIDER else 8, + ) + config.update( + { + "max_code_server_pods_per_node": max_code_server_pods_per_node, + "overprovisioning": config.get( + "overprovisioning", {"enabled": False, "replicas": 1} + ), + "resources": config.get( + "resources", + cls._calculate_code_server_resources( + cluster=cluster, + max_code_server_pods_per_node=max_code_server_pods_per_node, + ), + ), + } + ) + + return config + + @classmethod + def get_default_values(cls, env=None) -> dict: + cluster = env.cluster if env else Cluster.objects.current().first() + return { + "resources": cluster.code_server_config.get( + "resources", cls.code_server_resources_default + ) + } + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.code_server_config.copy() + if source: + config.update(source) + + config.update( + { + "resources": config.get( + "resources", cls.get_default_values(env)["resources"] + ), + } + ) + + return config + + @classmethod + def get_unmet_preconditions(cls, env: Environment): + unmet_preconditions = [] + + res = env.code_server_config["resources"] + res_cluster = env.cluster.code_server_config["resources"] + + # Valid memory request + mem_req, mem_req_units = k8s_extract_numerical_value_and_units( + res["requests"]["memory"] + ) + mem_req = k8s_convert_to_mebibytes(mem_req, KubeUnitsMemory(mem_req_units)) + + mem_default_req, mem_default_req_units = k8s_extract_numerical_value_and_units( + res_cluster["requests"]["memory"] + ) + mem_default_req = k8s_convert_to_mebibytes( + mem_default_req, KubeUnitsMemory(mem_default_req_units) + ) + + if mem_req < mem_default_req: + unmet_preconditions.append( + { + "code": "invalid_memory_request", + "message": f"The memory request must be greater than or equal to {mem_default_req}Mi.", + } + ) + + # Valid cpu request + cpu_req, cpu_req_units = k8s_extract_numerical_value_and_units( + res["requests"]["cpu"] + ) + cpu_req = k8s_convert_to_cpu(cpu_req, cpu_req_units == "m") + + cpu_default_req, cpu_default_req_units = k8s_extract_numerical_value_and_units( + res_cluster["requests"]["cpu"] + ) + cpu_default_req = k8s_convert_to_cpu( + cpu_default_req, cpu_default_req_units == "m" + ) + + if cpu_req < cpu_default_req: + unmet_preconditions.append( + { + "code": "invalid_cpu_request", + "message": f"The cpu request must be greater than or equal to {cpu_default_req}.", + } + ) + + return unmet_preconditions + + @classmethod + def get_user_unmet_preconditions(cls, ue: UserEnvironment): + """Returns a list of preconditions that where not met.""" + + unmet_preconditions = [] + + if not ue.user.is_repository_tested( + repository=ue.environment.project.repository + ): + unmet_preconditions.append( + { + "code": "invalid_repository_tested", + "message": "You must test git repository before using the service.", + } + ) + + return unmet_preconditions + + @classmethod + def get_user_unmet_preconditions_bulk(cls, ue_list) -> dict: + """Does get_user_unmet_preconditions, except optimized for bulk + results. + + For this to be optimally efficient, you should select_related 'user' + 'environment__project__', and 'environment__project__repository' + when building the ue_list queryset. + """ + + # ue.user.is_repository_tested will do an individual query for + # each user environment and there isn't an easy way to avoid this. + # + # As such, this call will do what is_repository_tested does under + # the hood, except it will do it in a bulk fashion. + + users = [ue.user.id for ue in ue_list] + + # Map user ID to dictionary mapping UserRepository.repository_id + # to UserRepository object + user_repositories = {} + + for ur in UserRepository.objects.filter(user_id__in=users): + if ur.user_id not in user_repositories: + user_repositories[ur.user_id] = {ur.repository_id: ur} + else: + user_repositories[ur.user_id][ur.repository_id] = ur + + # Map UserEnvironment ID's to unmet precondition lists + unmet_preconditions = {} + + # Figure out missing preconditions + for ue in ue_list: + unmet_preconditions[ue.id] = [] + repo_id = ue.environment.project.repository_id + if ( + ue.user_id not in user_repositories + or repo_id not in user_repositories[ue.user_id] + or user_repositories[ue.user_id][repo_id].validated_at is None + ): + unmet_preconditions[ue.id].append( + { + "code": "invalid_repository_tested", + "message": "You must test git repository before using the service.", + } + ) + + return unmet_preconditions + + @classmethod + def get_user_linked_services_unmet_preconditions( + cls, service_name: str, ue: UserEnvironment + ): + if service_name != "local-airflow" or ue.code_server_local_airflow_active: + return [] + else: + return [ + { + "code": "local_airflow_inactive", + "message": "Local airflow has not been turned on for this user.", + } + ] + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + """Returns the list of resources to be created on kubernetes""" + + res = [] + if env.cluster.code_server_config["overprovisioning"]["enabled"]: + pause_deployment = cls._gen_pause_deployment(env) + if pause_deployment: + res.append(cls._gen_priority_class()) + res.append(pause_deployment) + + return res + + @classmethod + def get_writable_config(cls, env: Environment) -> dict: + return { + "resources": env.code_server_config.get("resources"), + } + + @classmethod + def _gen_priority_class(cls) -> dict: + labels = {"app": "overprovisioning"} + labels.update(cls._get_labels_adapter()) + + return { + "apiVersion": "scheduling.k8s.io/v1", + "kind": "PriorityClass", + "metadata": { + "name": cls.over_provisioning_name, + "namespace": cls.over_provisioning_name_ns, + "labels": labels, + }, + "value": -10, + "globalDefault": False, + } + + @classmethod + def get_total_resources_for_code_server_image(cls, cluster: Cluster): + memory_requests = 0 + memory_limits = 0 + cpu_requests = 0 + cpu_limits = 0 + for item in [ + cluster.code_server_config["resources"], + cls.dbt_docs_resources_default, + cls.dbt_core_interface_resources_default, + cls.local_airflow_resources_default, + ]: + mem_req, mem_req_units = k8s_extract_numerical_value_and_units( + item["requests"]["memory"] + ) + men_lim, men_lim_units = k8s_extract_numerical_value_and_units( + item["limits"]["memory"] + ) + cpu_req, cpu_req_units = k8s_extract_numerical_value_and_units( + item["requests"]["cpu"] + ) + cpu_lim, cpu_lim_units = k8s_extract_numerical_value_and_units( + item["limits"]["cpu"] + ) + + memory_requests += k8s_convert_to_mebibytes( + mem_req, KubeUnitsMemory(mem_req_units) + ) + memory_limits += k8s_convert_to_mebibytes( + men_lim, KubeUnitsMemory(men_lim_units) + ) + cpu_requests += k8s_convert_to_cpu(cpu_req, cpu_req_units == "m") + cpu_limits += k8s_convert_to_cpu(cpu_lim, cpu_lim_units == "m") + + return { + "requests": { + "memory": f"{memory_requests}Mi", + "cpu": format(cpu_requests, ".2f"), + }, + "limits": { + "memory": f"{memory_limits}Mi", + "cpu": format(cpu_limits, ".2f"), + }, + } + + @classmethod + def _gen_pause_deployment(cls, env: Environment) -> dict: + try: + labels = {"app": "overprovisioning"} + labels.update(cls._get_labels_adapter()) + pause_image = ":".join( + env.cluster.get_service_image("core", "registry.k8s.io/pause") + ) + deployment = { + "apiVersion": "apps/v1", + "kind": "Deployment", + "metadata": { + "name": cls.over_provisioning_name, + "namespace": cls.over_provisioning_name_ns, + "labels": labels, + }, + "spec": { + "replicas": env.cluster.code_server_config["overprovisioning"][ + "replicas" + ], + "selector": {"matchLabels": {"app": "overprovisioning"}}, + "template": { + "metadata": {"labels": {"app": "overprovisioning"}}, + "spec": { + "priorityClassName": cls.over_provisioning_name, + "terminationGracePeriodSeconds": 0, + "nodeSelector": cls.VOLUMED_NODE_SELECTOR, + "imagePullSecrets": [ + {"name": env.cluster.docker_config_secret_name} + ], + "initContainers": cls._get_init_containers( + cluster=env.cluster + ), + "containers": [ + { + "name": "pause", + "resources": cls.get_total_resources_for_code_server_image( + env.cluster + ), + "image": pause_image, + } + ], + }, + }, + }, + } + + return deployment + except KeyError: + logger.error( + "Pause pods did not find the image registry.k8s.io/pause on release %s", + env.cluster.release, + ) + return None + + @classmethod + def _calculate_code_server_resources( + cls, cluster: Cluster, max_code_server_pods_per_node: int + ) -> dict: + """ + Calculate and return the pod resources based on node configuration and provider settings. + """ + + custom_resources = copy.deepcopy(cls.code_server_resources_default) + if cluster.defines_resource_requests: + memory_request, memory_limit = cls._calculate_node_code_server_memory( + cluster, max_code_server_pods_per_node + ) + custom_resources["requests"]["memory"] = f"{memory_request}Mi" + custom_resources["limits"]["memory"] = f"{memory_limit}Mi" + + # configure major resources + custom_resources = k8s_resources_combine( + cls.code_server_resources_default, custom_resources + ) + + return custom_resources + + @classmethod + def _calculate_node_code_server_memory( + cls, cluster: Cluster, max_code_server_pods_per_node: int + ) -> tuple[int, int]: + kubectl = cluster.kubectl + nodes = kubectl.get_nodes_by_selector(selector=cls.VOLUMED_NODE_SELECTOR) + node = nodes.items.pop() + node_memory, units = k8s_extract_numerical_value_and_units( + node.status.capacity["memory"] + ) + memory_per_pod = ceil(node_memory / max_code_server_pods_per_node) + memory_request = k8s_convert_to_mebibytes( + memory_per_pod, KubeUnitsMemory(units) + ) + return memory_request, memory_request + + @classmethod + def _get_init_containers(cls, cluster: Cluster) -> list: + cache_images = [] + for env in cluster.environments.select_related("release").filter( + type=Environment.TYPE_DEV + ): + if env.is_service_enabled(service_name=cls.service_name): + images_released = [ + f"{image}:{tag}" for image, tag in env.release.images.items() + ] + + # Always add observe-local-dbt-docs image + cache_images.extend( + [ + image + for image in images_released + if "observe-local-dbt-docs" in image + ] + ) + + # if the environment is using a custom profile image set, take the images from it + image_set = env.profile.image_set + if image_set and image_set.images: + logger.info( + "caching images to env %s and profile image set %s", + env.slug, + image_set.profile, + ) + + images_released = [ + f"{image}:{tag}" for image, tag in image_set.images.items() + ] + cache_images.extend( + [image for image in images_released if "code-server" in image] + ) + + else: + logger.info( + "caching images from env %s and release %s", + env.slug, + env.release, + ) + cache_images.extend( + [ + image + for image in images_released + if f"code-server-code-server-{env.release_profile}" in image + or f"code-server-dbt-core-interface-{env.release_profile}" + in image + ] + ) + + images = [] + for image in sorted(set(cache_images)): + if cluster.docker_registry and not image.startswith( + cluster.docker_registry + ): + image = f"{cluster.docker_registry}/{image}" + + images.append( + { + "name": f"overprovisioning-image-{len(images) + 1}", + "image": image, + "command": [ + "sh", + "-c", + f"echo Datacoves overprovisioning {image} done.", + ], + } + ) + + return images + + @classmethod + def get_public_url(cls, env: Environment, user: User): + """This is used by get_oidc_config to generate the URL for local + airflow.""" + + return f"https://{user.slug}-airflow-{env.slug}.{env.cluster.domain}" + + @classmethod + def gen_user_secrets( # noqa: C901 + cls, env: Environment, user: User, ue: UserEnvironment + ): + """Generates secrets for user and environment. Keep in mind that this is currently generating + ssl and ssh keys only for code-server""" + + code_server_ssl_keys = cls._gen_user_ssl_keys(env, user) + ssh_keys = cls._gen_user_ssh_keys(env, user) + files = cls._gen_user_profile_files(env, user) + + # Fetch our token if we don't already have it + if ( + cls.is_relation_cached(user, "auth_token") + and hasattr(user, "auth_token") + and user.auth_token is not None + ): + token = user.auth_token.key + else: + # SDC: This is less than optimal, because this particular + # method will be called in a loop and thus if the user doesn't + # have a key, we will do this get_or_create in a loop for + # that given user because it doesn't update our cached 'user' + # model object. + # + # We could reload 'user' from the database to update it, but + # I think that might delete associated cached items and thus + # cause larger problems. + # + # I believe this is (probably) a minority case, however, most + # users should have a token. So chances are, this query loop + # will happen exactly once per user, which I think is more + # acceptable than spending time trying to optimize this edge + # case. + token = Token.objects.get_or_create(user=user)[0].key + + ### WARNING WARNING WARNING + ### + ### To add an environment variable that is visible to the code + ### server USER, it must be prefixed with DC_CUSTOM__ + ### + ### Otherwise, the variable will only be visible to code server's + ### root user which is usually not as helpful but is useful for + ### items that are actually secrets. + + # TODO: Split secret into two different secrets: one for env vars and another for file mounts + data = { + "DATACOVES__SECRETS_TOKEN": token, + "DATACOVES__API_TOKEN": token, + "DATACOVES__SECRETS_URL": f"https://api.{env.cluster.domain}", + "DATACOVES__SECRETS_PROJECT": env.project.slug, + "DATACOVES__SSL_KEYS_JSON": json.dumps(code_server_ssl_keys), + "DATACOVES__SSH_KEYS_JSON": json.dumps(ssh_keys), + "DATACOVES__PROFILE_FILES": json.dumps(files), + "DC_CUSTOM__DATACOVES__ENVIRONMENT_SLUG": env.slug, + "DC_CUSTOM__DATACOVES__API_ENDPOINT": "http://core-dbt-api-svc.core.svc.cluster.local", + "DC_CUSTOM__DATACOVES__PROJECT_SLUG": env.project.slug, + } + + if ue.code_server_local_airflow_active: + if ( + not ue.local_airflow_config + or not ue.local_airflow_config.get("webserver") + or not ue.local_airflow_config["webserver"].get("oidc") + ): + # This should happen exactly once for each user that + # enables local airflow, which will be a small subset of + # users. So even though this is a query in a loop, I + # think it is both acceptable and unavoidable. + if not ue.local_airflow_config: + # Make sure this is a dictionary. + ue.local_airflow_config = {} + + if "webserver" not in ue.local_airflow_config: + ue.local_airflow_config["webserver"] = {} + + ue.local_airflow_config["webserver"] = { + "oidc": cls.get_oidc_config( + env, + "/oauth-authorized/datacoves", + f"{user.slug}-local-airflow", + user, + ) + } + + # Avoid triggers and such. + UserEnvironment.objects.filter(id=ue.id).update( + local_airflow_config=ue.local_airflow_config + ) + + data[ + "DATACOVES__AIRFLOW_WEBSERVER_CONFIG" + ] = cls._gen_airflow_webserver_config( + env, + ue.local_airflow_config["webserver"]["oidc"], + "Admin", + ) + + # Custom env variables + # If you change this prefix you must change it in the code server run file + env_prefix = "DC_CUSTOM__" + + # Project variables + if env.project.variables: + for key, value in env.project.variables.items(): + data[f"{env_prefix}{key}"] = value + + # Environment variables + if env.variables: + for key, value in env.variables.items(): + data[f"{env_prefix}{key}"] = value + + # UserEnvironment variables + if ue and ue.variables: + for key, value in ue.variables.items(): + data[f"{env_prefix}{key}"] = value + + if env.is_service_enabled_and_valid(settings.SERVICE_AIRBYTE): + data[ + "DATACOVES__AIRBYTE_HOST_NAME" + ] = f"http://{env.slug}-airbyte-airbyte-server-svc" + data["DATACOVES__AIRBYTE_PORT"] = 8001 + + dbt_log_level_config = "warn" + if env.is_service_enabled_and_valid(settings.SERVICE_AIRFLOW): + data["DATACOVES__AIRFLOW_DAGS_YML_PATH"] = env.airflow_config.get( + "yaml_dags_folder" + ) + data["DATACOVES__AIRFLOW_DAGS_PATH"] = env.airflow_config.get("dags_folder") + dbt_log_level_config = env.airflow_config.get( + "dbt_log_level", dbt_log_level_config + ) + data["DATACOVES__AIRFLOW_DBT_PROFILE_PATH"] = env.dbt_profiles_dir + + data["LOG_LEVEL"] = dbt_log_level_config + data["DATACOVES__REPO_PATH"] = REPO_PATH + data["DATACOVES__DBT_ADAPTER"] = env.release_profile.replace("dbt-", "") + dbt_home = f"{REPO_PATH}/{env.dbt_home_path}" + data["DATACOVES__DBT_HOME"] = dbt_home + if env.is_service_enabled_and_valid(settings.SERVICE_DATAHUB): + data[ + "DC_CUSTOM__DATACOVES__DATAHUB_HOST_NAME" + ] = f"http://{env.slug}-datahub-datahub-gms" + data["DC_CUSTOM__DATACOVES__DATAHUB_PORT"] = 8080 + if env.is_service_enabled_and_valid(settings.SERVICE_CODE_SERVER): + for name, value in cls.get_datacoves_versions(env).items(): + data[f"{env_prefix}{name}"] = value + secrets = make.hashed_secret( + name=f"{user.slug}-user-secrets", + data=cls._copy_user_secrets_for_local_airflow(data=data, prefix=env_prefix), + labels=cls._get_labels_adapter(), + ) + return secrets + + @classmethod + def _copy_user_secrets_for_local_airflow(cls, prefix: str, data: dict = {}) -> dict: + """We duplicate the variables so that they are available in my local airflow""" + local_airflow_env = data.copy() + for key, value in local_airflow_env.items(): + if key.startswith(prefix): + new_key = key.replace(prefix, "") + data[new_key] = value + return data + + @classmethod + def _gen_user_ssl_keys(cls, env: Environment, user: User): + code_server_ssl_keys = [] + if env.profile.mount_ssl_keys: + # Have we already prefetched user credentials? If so, let's + # not query it again + if cls.is_relation_cached(user, "credentials"): + # This is prefetched + user_credentials = [ + user_credential + for user_credential in user.credentials.all() + if user_credential.environment_id == env.id + and user_credential.ssl_key is not None + and user_credential.validated_at is not None + ] + + else: + user_credentials = ( + user.credentials.select_related("connection_template") + .filter( + environment=env, + ssl_key__isnull=False, + validated_at__isnull=False, + ) + .order_by("id") + ) + + for user_cred in user_credentials: + targets = [] + for usage in user_cred.used_on: + service, target = usage.split(".") + if service == settings.SERVICE_CODE_SERVER: + targets.append(target) + if targets: + key = user_cred.ssl_key + code_server_ssl_keys.append( + { + "targets": targets, + "connection": user_cred.slug, + "key_type": key.key_type, + "private": key.private, + "public": key.public, + } + ) + return code_server_ssl_keys + + @classmethod + def _gen_user_ssh_keys(cls, env: Environment, user: User): + ssh_keys = [] + if env.profile.mount_ssh_keys: + if cls.is_relation_cached(user, "repositories"): + tested_repos = [ + repo + for repo in user.repositories.all() + if repo.repository_id == env.project.repository_id + and repo.validated_at is not None + ] + else: + tested_repos = user.get_repositories_tested(env.project.repository) + + for user_repo in tested_repos: + key = user_repo.ssh_key + ssh_keys.append( + { + "repo": user_repo.repository.git_url, + "key_type": key.key_type, + "private": key.private, + "public": key.public, + } + ) + return ssh_keys + + @classmethod + def _gen_user_profile_files(cls, env: Environment, user: User): # noqa: C901 + """Generate user profile files from templates. + + The linter complains that this is too complex, but the complexity + is around dealing with caching and it isn't easy or even smart to + refactor it into separarate functions. + """ + + files = [] + + template_filter = ( + Q(template__context_type=Template.CONTEXT_TYPE_USER_CREDENTIALS) + | Q(template__context_type=Template.CONTEXT_TYPE_NONE) + | Q(template__context_type=Template.CONTEXT_TYPE_USER) + | Q(template__context_type=Template.CONTEXT_TYPE_ENVIRONMENT) + ) + + # Template filter as a set as well for python-based filtering. + template_set = set( + [ + Template.CONTEXT_TYPE_USER_CREDENTIALS, + Template.CONTEXT_TYPE_NONE, + Template.CONTEXT_TYPE_USER, + Template.CONTEXT_TYPE_ENVIRONMENT, + ] + ) + + # Do we already have the profile data loaded? If so, let's filter + # in python. + if cls.is_relation_cached(env, "profile") and cls.is_relation_cached( + env.profile, "files" + ): + profile_files = [ + f + for f in env.profile.files.all() + if f.template.context_type in template_set + ] + else: + profile_files = env.profile.files.select_related("template").filter( + template_filter + ) + + if env.profile.files_from: + # By now, env.profile is definitely loaded. And this will + # eagerly load files_from if it isn't yet cached, so we + # just care if files_from.files has been loaded. + if cls.is_relation_cached(env.profile.files_from, "files"): + # Make a set of already loaded slugs + existing_mount_paths = {f.mount_path for f in profile_files} + + files_from_files = [ + f + for f in env.profile.files_from.files.all() + if f.template.context_type in template_set + and f.mount_path not in existing_mount_paths + ] + + profile_files += files_from_files + + else: + profile_files = ( + env.profile.files_from.files.select_related("template") + .filter(template_filter) + .exclude(mount_path__in=[file.mount_path for file in profile_files]) + .union(profile_files) + ) + + # profile_files will be either a list or a QuerySet. Let's handle + # sorting. + if isinstance(profile_files, list): + profile_files.sort(key=lambda x: x.slug) + else: + profile_files = profile_files.order_by("slug") + + # Only user_credentials type of contexts are supported right now + for file in profile_files: + try: + context = {} + if file.template.context_type == Template.CONTEXT_TYPE_USER_CREDENTIALS: + context = build_user_credentials_context( + user, env, list(user.credentials.filter(environment=env)) + ) + elif file.template.context_type == Template.CONTEXT_TYPE_ENVIRONMENT: + context = build_environment_context(env) + elif file.template.context_type == Template.CONTEXT_TYPE_USER: + context = build_user_context(user) + generated = file.template.render(context) + except jinja2.exceptions.TemplateError as e: + # Don't make the whole env sync fail because of a template rendering error. + # We record the error in the file itself, to be found when debugging. + generated = f"# Error generating file, rendering template: {e}\n" + if generated.strip(): + if not file.execute: + generated = file.template.embedded_comment + generated + files.append( + { + "slug": file.slug, + "content": generated, + "mount_path": file.mount_path, + "override": file.override_existent, + "execute": file.execute, + "permissions": file.permissions, + } + ) + + return files diff --git a/src/core/api/app/clusters/adapters/datahub.py b/src/core/api/app/clusters/adapters/datahub.py new file mode 100644 index 00000000..c9b316d9 --- /dev/null +++ b/src/core/api/app/clusters/adapters/datahub.py @@ -0,0 +1,423 @@ +from clusters.models import Cluster +from clusters.tasks import setup_datahub_groups +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from ..external_resources.postgres import create_database +from . import EnvironmentAdapter + +GROUPS_CREATION_DELAY_MINUTES = 10 + + +class DataHubAdapter(EnvironmentAdapter): + service_name = settings.SERVICE_DATAHUB + deployment_name = ( + "{env_slug}-datahub-datahub-frontend,{env_slug}-datahub-datahub-gms" + ) + subdomain = "datahub-{env_slug}" + chart_versions = ["0.4.16"] + + @classmethod + def _gen_db_credentials_secret(cls, password=None): + return make.hashed_secret( + name="datahub-db", + data={ + "postgres-password": password, + }, + ) + + @classmethod + def _gen_oidc_secret(cls, env: Environment): + oauth = env.datahub_config["oauth"] + return make.hashed_secret( + name="datahub-oidc", + data={ + "client_id": oauth["idp_client_id"], + "client_secret": oauth["idp_client_secret"], + }, + labels=cls._get_labels_adapter(), + ) + + @classmethod + def get_oidc_groups(cls, env: Environment, user): + permissions = user.service_resource_permissions(cls.service_name, env=env) + groups = ["Reader"] + if "*|write" in permissions or "admin|write" in permissions: + groups = ["Admin"] + elif "data|write" in permissions: + groups = ["Editor"] + return groups + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + resources = [] + db_secret_name = None + # when there is an external database available + if env.datahub_config.get("db", {}).get("host"): + db_secret = cls._gen_db_credentials_secret( + password=env.datahub_config["db"]["password"] + ) + db_secret_name = db_secret["metadata"]["name"] + resources.append(db_secret) + + oidc_secret = cls._gen_oidc_secret(env) + resources.append(oidc_secret) + values = cls._gen_datahub_values( + env, db_secret_name, oidc_secret["metadata"]["name"] + ) + + resources.append( + make.hashed_json_config_map( + name="datahub-values", + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + ) + + return resources + + @classmethod + def sync_external_resources(cls, env: Environment): + cls._sync_external_dbs(env) + + @classmethod + def _sync_external_dbs(cls, env: Environment): + if not env.datahub_config["db"].get("external", False): + return + + if not env.cluster.has_dynamic_db_provisioning(): + return + + already_configured = ( + cls._external_db_config_unmet_preconditions(env.datahub_config) == [] + ) + if already_configured: + return + + db_data = create_database(env, "dh") + env.datahub_config["db"].update(db_data) + Environment.objects.filter(id=env.id).update(datahub_config=env.datahub_config) + + @classmethod + def get_cluster_default_config(cls, cluster: Cluster, source: dict = None) -> dict: + config = super().get_cluster_default_config(cluster=cluster, source=source) + config.update( + { + "db": config.get( + "db", + { + "external": False, + "backend": "postgres", + }, + ), + } + ) + + return config + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.datahub_config.copy() + if source: + config.update(source) + + oidc = config.get("oauth") + + if not oidc: + oidc = cls.get_oidc_config(env, "/callback/oidc") + # Datahub helm chart does not support HostAliases on the frontend pod, that makes + # impossible to reach the api pod from inside the frontend pod. + # On datacoveslocal.com we disable SSO integration, it could be enabled if you manually edit the + # front end deployment and add the corresponding HostAliases, copy the defintion from the gms pod ;) + oidc["enabled"] = not env.cluster.is_local + + sa_user = config.get("service_account_user_id") + if not sa_user: + sa_user = cls.setup_service_account(env).id + + config.update( + { + "db": config.get( + "db", + { + "external": env.cluster.datahub_config["db"]["external"], + }, + ), + "oauth": oidc, + "airflow_ingestion_enabled": config.get( + "airflow_ingestion_enabled", True + ), + "service_account_user_id": sa_user, + } + ) + + return config + + @classmethod + def get_unmet_preconditions(cls, env: Environment): + return cls._external_db_config_unmet_preconditions(env.datahub_config) + + @classmethod + def _gen_datahub_values( + cls, env: Environment, db_secret_name: str, oidc_secret_name: str + ): + oauth = env.datahub_config["oauth"] + gms_repo, gms_tag = env.get_service_image( + "datahub", "acryldata/datahub-gms", include_registry=False + ) + frontend_repo, frontend_tag = env.get_service_image( + "datahub", "acryldata/datahub-frontend-react", include_registry=False + ) + actions_repo, actions_tag = env.get_service_image( + "datahub", "acryldata/datahub-actions", include_registry=False + ) + mae_repo, mae_tag = env.get_service_image( + "datahub", "acryldata/datahub-mae-consumer", include_registry=False + ) + mce_repo, mce_tag = env.get_service_image( + "datahub", "acryldata/datahub-mce-consumer", include_registry=False + ) + elastic_setup_repo, elastic_setup_tag = env.get_service_image( + "datahub", "acryldata/datahub-elasticsearch-setup", include_registry=False + ) + kafka_setup_repo, kafka_setup_tag = env.get_service_image( + "datahub", "acryldata/datahub-kafka-setup", include_registry=False + ) + postgres_setup_repo, postgres_setup_tag = env.get_service_image( + "datahub", "acryldata/datahub-postgres-setup", include_registry=False + ) + upgrade_repo, upgrade_tag = env.get_service_image( + "datahub", "acryldata/datahub-upgrade", include_registry=False + ) + pull_secrets = [{"name": env.docker_config_secret_name}] + + if db_secret_name: + # Using cluster provisioned db + db_data = env.datahub_config.get("db") + db_data["secret_name"] = db_secret_name + else: + # Using adapter provisioned db + db_data = env.postgresql_config.get("auth") + + db_name = env.datahub_config.get("db", {}).get("database", "datahub") + + values = { + "global": { + "imageRegistry": env.docker_registry or "docker.io", + "sql": { + "datasource": { + "host": f"{db_data['host']}:5432", + "hostForpostgresqlClient": db_data["host"], + "port": f"{db_data['port']}", + "url": f"jdbc:postgresql://{db_data['host']}:{db_data['port']}/{db_name}", + "driver": "org.postgresql.Driver", + "username": db_data["user"], + "password": { + "secretRef": db_data["secret_name"], + "secretKey": "postgres-password", + }, + "extraEnvs": [{"name": "DATAHUB_DB_NAME", "value": db_name}], + } + }, + "kafka": { + "bootstrap": {"server": f"{env.slug}-kafka:9092"}, + "zookeeper": {"server": f"{env.slug}-zookeeper:2181"}, + }, + "hostAliases": [ + { + "ip": env.cluster.internal_ip, + "hostnames": [f"api.{env.cluster.domain}"], + } + ], + }, + "datahub-gms": { + "image": {"repository": gms_repo, "tag": gms_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "service": {"type": "ClusterIP"}, + }, + "datahub-frontend": { + "image": {"repository": frontend_repo, "tag": frontend_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "service": {"type": "ClusterIP"}, + "extraEnvs": [ + { + "name": "AUTH_OIDC_CLIENT_SECRET", + "valueFrom": { + "secretKeyRef": { + "name": oidc_secret_name, + "key": "client_secret", + } + }, + }, + { + "name": "AUTH_OIDC_CLIENT_ID", + "valueFrom": { + "secretKeyRef": { + "name": oidc_secret_name, + "key": "client_id", + } + }, + }, + { + "name": "AUTH_OIDC_ENABLED", + "value": str(oauth["enabled"]).lower(), + }, + { + "name": "AUTH_OIDC_DISCOVERY_URI", + "value": oauth["idp_provider_url"] + + "/.well-known/openid-configuration/", + }, + {"name": "AUTH_OIDC_BASE_URL", "value": cls.get_public_url(env)}, + {"name": "AUTH_OIDC_PREFERRED_JWS_ALGORITHM", "value": "RS256"}, + { + "name": "AUTH_OIDC_CLIENT_AUTHENTICATION_METHOD", + "value": "client_secret_post", + }, + { + "name": "AUTH_OIDC_EXTRACT_GROUPS_ENABLED", + "value": "true", + }, + { + "name": "AUTH_OIDC_GROUPS_CLAIM", + "value": "groups", + }, + {"name": "AUTH_OIDC_SCOPE", "value": " ".join(oauth["idp_scopes"])}, + ], + }, + "acryl-datahub-actions": { + "image": {"repository": actions_repo, "tag": actions_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + }, + "datahub-mae-consumer": { + "image": {"repository": mae_repo, "tag": mae_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + }, + "datahub-mce-consumer": { + "image": {"repository": mce_repo, "tag": mce_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + }, + "elasticsearchSetupJob": { + "image": {"repository": elastic_setup_repo, "tag": elastic_setup_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + }, + "kafkaSetupJob": { + "image": {"repository": kafka_setup_repo, "tag": kafka_setup_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + }, + "mysqlSetupJob": {"enabled": False}, + "postgresqlSetupJob": { + "image": {"repository": postgres_setup_repo, "tag": postgres_setup_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "enabled": True, + "extraEnvs": [{"name": "DATAHUB_DB_NAME", "value": db_name}], + }, + "datahubUpgrade": { + "image": {"repository": upgrade_repo, "tag": upgrade_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "noCodeDataMigration": {"sqlDbType": "POSTGRES"}, + "restoreIndices": { + "extraEnvs": [{"name": "DATAHUB_DB_NAME", "value": "dh"}] + }, + }, + "datahubSystemUpdate": { + "image": {"repository": upgrade_repo, "tag": upgrade_tag}, + "imagePullSecrets": pull_secrets, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + }, + } + + values["datahub-gms"]["resources"] = { + "limits": {"memory": "2Gi"}, + "requests": {"cpu": "100m", "memory": "1Gi"}, + } + values["datahub-frontend"]["resources"] = { + "limits": {"memory": "1400Mi"}, + "requests": {"cpu": "100m", "memory": "512Mi"}, + } + values["acryl-datahub-actions"]["resources"] = { + "limits": {"memory": "1Gi"}, + "requests": {"cpu": "300m", "memory": "512Mi"}, + } + values["datahub-mae-consumer"]["resources"] = { + "limits": {"memory": "1536Mi"}, + "requests": {"cpu": "100m", "memory": "256Mi"}, + } + values["datahub-mce-consumer"]["resources"] = { + "limits": {"memory": "1536Mi"}, + "requests": {"cpu": "100m", "memory": "256Mi"}, + } + values["elasticsearchSetupJob"]["resources"] = { + "limits": {"cpu": "500m", "memory": "500Mi"}, + "requests": {"cpu": "300m", "memory": "256Mi"}, + } + values["kafkaSetupJob"]["resources"] = { + "limits": {"cpu": "500m", "memory": "1024Mi"}, + "requests": {"cpu": "300m", "memory": "768Mi"}, + } + values["postgresqlSetupJob"]["resources"] = { + "limits": {"cpu": "500m", "memory": "512Mi"}, + "requests": {"cpu": "300m", "memory": "256Mi"}, + } + values["datahubUpgrade"]["cleanupJob"] = { + "resources": { + "limits": {"cpu": "500m", "memory": "512Mi"}, + "requests": {"cpu": "300m", "memory": "256Mi"}, + } + } + values["datahubUpgrade"]["restoreIndices"] = { + "resources": { + "limits": {"cpu": "500m", "memory": "512Mi"}, + "requests": {"cpu": "300m", "memory": "256Mi"}, + } + } + values["datahubSystemUpdate"]["resources"] = { + "limits": {"cpu": "500m", "memory": "512Mi"}, + "requests": {"cpu": "300m", "memory": "256Mi"}, + } + + return deep_merge( + env.datahub_config.get("override_values", {}), + deep_merge(env.cluster.datahub_config.get("override_values", {}), values), + ) + + @classmethod + def get_internal_service_config(cls, env: Environment, name: str) -> dict: + # Just returning a static "datahub" string to let the internal adapters know they are required + if name == "postgresql" and not env.cluster.has_dynamic_db_provisioning(): + return "datahub" + if name in ["elastic", "kafka"]: + return "datahub" + return None + + @classmethod + def on_post_enabled(cls, env: Environment) -> dict: + """ + We setup groups some minutes after datahub was enabled + """ + setup_datahub_groups.apply_async( + (env.slug,), countdown=GROUPS_CREATION_DELAY_MINUTES * 60 + ) + + return {} diff --git a/src/core/api/app/clusters/adapters/dbt_docs.py b/src/core/api/app/clusters/adapters/dbt_docs.py new file mode 100644 index 00000000..71ec3d25 --- /dev/null +++ b/src/core/api/app/clusters/adapters/dbt_docs.py @@ -0,0 +1,84 @@ +from django.conf import settings +from projects.models import Environment + +from lib.kubernetes import make + +from . import EnvironmentAdapter + + +class DbtDocsAdapter(EnvironmentAdapter): + service_name = settings.SERVICE_DBT_DOCS + deployment_name = "dbt-docs" + subdomain = "dbt-docs-{env_slug}" + + DBT_DOCS_GIT_SYNC_SECRET_NAME = "dbt-docs-git-sync-secrets" + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + dbt_docs_env = cls._gen_dbt_docs_git_sync_secrets(env) + dbt_docs_env_secret = make.hashed_secret( + name=cls.DBT_DOCS_GIT_SYNC_SECRET_NAME, + data=dbt_docs_env, + labels=cls._get_labels_adapter(), + ) + return [dbt_docs_env_secret] + + @classmethod + def get_unmet_preconditions(cls, env: Environment): + return cls._git_clone_unmet_precondition(env) + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.dbt_docs_config.copy() + + if source: + config.update(source) + + # set defaults if needed for Azure Devops, which doesn't use the + # secret. + base_defaults = { + "git_branch": config.get("git_branch", "dbt-docs"), + } + + if env.project.clone_strategy.startswith("azure"): + base_defaults[ + "askpass_url" + ] = "http://core-api-svc.core/api/v1/gitcallback/" + str(env.project.uid) + + config.update(base_defaults) + + return config + + @classmethod + def _gen_dbt_docs_git_sync_secrets(cls, env: Environment): + if env.project.clone_strategy == env.project.HTTP_CLONE_STRATEGY: + _, image_tag = env.release.get_image( + repo="registry.k8s.io/git-sync/git-sync" + ) + # git-sync v3 + key_name_git_sync_username = "GIT_SYNC_USERNAME" + key_name_git_sync_password = "GIT_SYNC_PASSWORD" + if image_tag.startswith("v4"): + # git-sync v4 + key_name_git_sync_username = "GITSYNC_USERNAME" + key_name_git_sync_password = "GITSYNC_PASSWORD" + + creds = env.project.deploy_credentials + return { + key_name_git_sync_username: creds["git_username"], + key_name_git_sync_password: creds["git_password"], + } + + elif env.project.clone_strategy == env.project.SSH_CLONE_STRATEGY: + return { + "gitSshKey": env.project.deploy_key.private, + } + + else: + return {} + + @classmethod + def get_writable_config(cls, env: Environment) -> dict: + return { + "git_branch": env.dbt_docs_config.get("git_branch"), + } diff --git a/src/core/api/app/clusters/adapters/elastic.py b/src/core/api/app/clusters/adapters/elastic.py new file mode 100644 index 00000000..e7b61a06 --- /dev/null +++ b/src/core/api/app/clusters/adapters/elastic.py @@ -0,0 +1,90 @@ +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from . import EnvironmentAdapter + + +class ElasticAdapter(EnvironmentAdapter): + service_name = settings.INTERNAL_SERVICE_ELASTIC + deployment_name = "{env_slug}-elastic" + default_resources = { + "requests": {"cpu": "500m", "memory": "1Gi"}, + "limits": {"cpu": "1", "memory": "2Gi"}, + } + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + resources = [] + + if extra_config: + values = cls._gen_values(env, extra_config) + + values_config_map = make.hashed_json_config_map( + name="elastic-values", + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + resources.append(values_config_map) + + return resources + + @classmethod + def enable_service(cls, env: Environment, extra_config: list = None): + enable = True if extra_config else False + + # To avoid a mandatory update query this otherwise causes. + if env.internal_services[cls.service_name].get("enabled") != enable: + env.internal_services[cls.service_name]["enabled"] = enable + + Environment.objects.filter(id=env.id).update( + internal_services=env.internal_services, + ) + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.elastic_config.copy() + if env and env.type: + high_availability = env.type == env.TYPE_PROD + else: + high_availability = False + if source: + config.update(source) + + config.update( + { + "resources": config.get("resources", cls.default_resources), + "replicas": 3 if high_availability else 1, + } + ) + + return config + + @classmethod + def _gen_values(cls, env: Environment, extra_config: list): + config = env.elastic_config + + image, tag = env.get_service_image( + "elastic", "docker.elastic.co/elasticsearch/elasticsearch" + ) + + values = { + "image": image, + "imageTag": tag, + "nodeSelector": cls.VOLUMED_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "imagePullSecrets": [{"name": env.docker_config_secret_name}], + "esJavaOpts": "-Xmx512m -Xms512m", + "replicas": config["replicas"], + "minimumMasterNodes": 1, + } + if config["replicas"] == 1: + values["antiAffinity"] = "soft" + values["clusterHealthCheckParams"] = "wait_for_status=yellow&timeout=1s" + + if env.cluster.defines_resource_requests: + values["resources"] = config["resources"] + + return deep_merge(config.get("override_values", {}), values) diff --git a/src/core/api/app/clusters/adapters/grafana.py b/src/core/api/app/clusters/adapters/grafana.py new file mode 100644 index 00000000..fd13f775 --- /dev/null +++ b/src/core/api/app/clusters/adapters/grafana.py @@ -0,0 +1,25 @@ +import logging +from copy import deepcopy + +from django.conf import settings +from projects.models import Environment + +from . import EnvironmentAdapter + +logger = logging.getLogger(__name__) + + +class GrafanaAdapter(EnvironmentAdapter): + service_name = settings.INTERNAL_SERVICE_GRAFANA + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = deepcopy(env.grafana_config) + return config + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + """ + Generate k8s and Grafana resources + """ + return [] diff --git a/src/core/api/app/clusters/adapters/kafka.py b/src/core/api/app/clusters/adapters/kafka.py new file mode 100644 index 00000000..0928df2a --- /dev/null +++ b/src/core/api/app/clusters/adapters/kafka.py @@ -0,0 +1,112 @@ +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from . import EnvironmentAdapter + + +class KafkaAdapter(EnvironmentAdapter): + service_name = settings.INTERNAL_SERVICE_KAFKA + deployment_name = "{env_slug}-kafka" + default_resources = { + "requests": {"cpu": "250m", "memory": "300mi"}, + "limits": {"cpu": "500m", "memory": "1Gi"}, + } + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + resources = [] + + if extra_config: + values = cls._gen_values(env, extra_config) + + values_config_map = make.hashed_json_config_map( + name="kafka-values", + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + resources.append(values_config_map) + + return resources + + @classmethod + def enable_service(cls, env: Environment, extra_config: list = None): + enable = True if extra_config else False + + # To avoid a mandatory update query this otherwise causes. + if env.internal_services[cls.service_name].get("enabled") != enable: + env.internal_services[cls.service_name]["enabled"] = enable + + Environment.objects.filter(id=env.id).update( + internal_services=env.internal_services, + ) + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.kafka_config.copy() + if source: + config.update(source) + + config.update( + { + "resources": config.get("resources", cls.default_resources), + } + ) + + return config + + @classmethod + def _gen_values(cls, env: Environment, extra_config: list): + config = env.kafka_config + + kafka_image, kafka_tag = env.get_service_image( + "kafka", "bitnami/kafka", include_registry=False + ) + + zookeeper_image, zookeeper_tag = env.get_service_image( + "kafka", "bitnami/zookeeper", include_registry=False + ) + + values = { + "image": { + "registry": env.docker_registry or "docker.io", + "repository": kafka_image, + "tag": kafka_tag, + "pullSecrets": [env.docker_config_secret_name], + }, + "nodeSelector": cls.VOLUMED_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "listeners": { + "client": {"protocol": "PLAINTEXT"}, + "interbroker": {"protocol": "PLAINTEXT"}, + }, + "controller": {"replicaCount": 0}, + "broker": { + "replicaCount": 1, + "minId": 0, + "extraConfig": "\n".join( + [ + "message.max.bytes=5242880", + "default.replication.factor=1", + "offsets.topic.replication.factor=1", + "transaction.state.log.replication.factor=1", + ] + ), + }, + "kraft": {"enabled": False}, + "zookeeper": { + "enabled": True, + "image": { + "registry": env.docker_registry or "docker.io", + "repository": zookeeper_image, + "tag": zookeeper_tag, + "pullSecrets": [env.docker_config_secret_name], + }, + }, + } + if env.cluster.defines_resource_requests: + values["resources"] = config["resources"] + + return deep_merge(config.get("override_values", {}), values) diff --git a/src/core/api/app/clusters/adapters/minio.py b/src/core/api/app/clusters/adapters/minio.py new file mode 100644 index 00000000..a6d49e5c --- /dev/null +++ b/src/core/api/app/clusters/adapters/minio.py @@ -0,0 +1,95 @@ +import secrets + +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from . import EnvironmentAdapter + + +class MinioAdapter(EnvironmentAdapter): + service_name = settings.INTERNAL_SERVICE_MINIO + deployment_name = "{env_slug}-minio" + + MINIO_VALUES_CONFIG_MAP_NAME = "minio-values" + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + buckets = set().union(*[x.get("buckets", False) for x in extra_config]) + resources = [] + + if buckets: + values = cls._gen_values(env, buckets) + + values_config_map = make.hashed_json_config_map( + name=cls.MINIO_VALUES_CONFIG_MAP_NAME, + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + resources.append(values_config_map) + + return resources + + @classmethod + def enable_service(cls, env: Environment, extra_config: list = None): + buckets = set().union(*[x.get("buckets", False) for x in extra_config]) + enable = True if buckets else False + + # To avoid a mandatory update query this otherwise causes. + if env.internal_services[cls.service_name].get("enabled") != enable: + env.internal_services[cls.service_name]["enabled"] = enable + + Environment.objects.filter(id=env.id).update( + internal_services=env.internal_services, + ) + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.minio_config.copy() + if source: + config.update(source) + + config.update( + { + "auth": config.get( + "auth", + { + "root_user": "admin", + "root_password": secrets.token_urlsafe(16) + .replace("-", "") + .replace("_", ""), + }, + ), + } + ) + + return config + + @classmethod + def _gen_values(cls, env: Environment, buckets: list): + config = env.minio_config + + image, tag = env.get_service_image( + "core", "bitnami/minio", include_registry=False + ) + + values = { + "auth": { + "rootUser": config["auth"]["root_user"], + "rootPassword": config["auth"]["root_password"], + }, + "defaultBuckets": ",".join(buckets), + "nodeSelector": cls.VOLUMED_NODE_SELECTOR, + "image": { + "repository": image, + "tag": tag, + "pullSecrets": [env.docker_config_secret_name], + }, + "commonLabels": cls._get_labels_adapter(), + } + if env.docker_registry: + values["image"]["registry"] = env.docker_registry + + return deep_merge(config.get("override_values", {}), values) diff --git a/src/core/api/app/clusters/adapters/mixins/__init__.py b/src/core/api/app/clusters/adapters/mixins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/adapters/mixins/airflow_config.py b/src/core/api/app/clusters/adapters/mixins/airflow_config.py new file mode 100644 index 00000000..41c5a696 --- /dev/null +++ b/src/core/api/app/clusters/adapters/mixins/airflow_config.py @@ -0,0 +1,325 @@ +"""This file is a "mixin" style base class which provides methods needed +by both CodeServerAdapter for its local airflow and AirflowAdapter.""" + +from pathlib import Path + +from clusters.models import Cluster +from django.conf import settings +from projects.models import Environment + +REPO_PATH = "/opt/airflow/dags/repo" +REPO_PATH_WRITABLE = "/tmp/airflow_repo" + + +class AirflowConfigMixin: + @classmethod + def _get_log_backed(cls, env: Environment) -> str: + return env.airflow_config["logs"]["backend"] + + @classmethod + def _setup_second_log_handler(cls, env: Environment) -> bool: + return env.airflow_config.get("logs", {}).get("loki_enabled", False) + + @classmethod + def _gen_airflow_webserver_config( + cls, env: Environment, oauth: dict = None, default_role: str = "Viewer" + ): + """This generates a webserver_config.py for airflow in order to let + our authorization system work. If oauth is not provided, we will + get it from env.airflow_config. It should be the return value of + get_oidc_config + + default_role is the role which will be given as a base level + permission. For shared airflow, this should be Viewer (default) + but for local airflow it should be Admin. + """ + + if oauth is None: + oauth = env.airflow_config.get("oauth") + + OAUTH_PROVIDERS = [ + { + "name": "datacoves", + "token_key": "access_token", + "remote_app": { + "client_id": oauth["idp_client_id"], + "client_secret": oauth["idp_client_secret"], + "api_base_url": oauth["idp_provider_url"], + "server_metadata_url": f"{oauth['idp_provider_url']}/.well-known/openid-configuration", + "client_kwargs": {"scope": " ".join(oauth["idp_scopes"])}, + }, + } + ] + + # Code server always uses security_manager v2 + if cls.service_name == "code-server" or cls._is_feature_enabled( + "security_manager_v2", env + ): + with open("clusters/adapters/airflow/security_manager.py", "r") as file: + security_manager = file.read() + + security_manager_conf = ( + "\n" + + "FORCE_ADMIN_ROLE=" + + ("True" if default_role == "Admin" else "False") + + "\n\n" + + security_manager + + "\n\nSECURITY_MANAGER_CLASS = CustomSecurityManager" + ) + else: + security_manager_conf = "\nFAB_SECURITY_MANAGER_CLASS = 'security_manager.CustomSecurityManager'" + + role_mappings = { + role: [role] for role in ["User", "Admin", "Op", "Viewer", "SysAdmin"] + } + return ( + "\n".join( + [ + "from flask_appbuilder.security.manager import AUTH_OAUTH", + "WTF_CSRF_ENABLED = True", + "AUTH_USER_REGISTRATION = True", + f"AUTH_USER_REGISTRATION_ROLE = '{default_role}'", + f"AUTH_ROLES_MAPPING = {role_mappings}", + "AUTH_TYPE = AUTH_OAUTH", + "AUTH_ROLES_SYNC_AT_LOGIN = True", + f"OAUTH_PROVIDERS = {OAUTH_PROVIDERS}", + ] + ) + + security_manager_conf + ) + + @classmethod + def _get_default_env_vars(cls, env: Environment) -> dict: + # Determine if the environment is high availability (production) + high_availability = env.type == env.TYPE_PROD + + # Default file process timeout + file_process_timeout = 60 + + # Increase timeout for local airflow + if env.cluster.provider == Cluster.KIND_PROVIDER: + file_process_timeout = 180 + + # Default environment variables for Airflow + default_envs = { + "AIRFLOW__CORE__DEFAULT_TASK_RETRIES": 2, + "AIRFLOW__CORE__PARALLELISM": 64 if high_availability else 32, + "AIRFLOW__SCHEDULER__MAX_DAGRUNS_TO_CREATE_PER_LOOP": 20 + if high_availability + else 10, + "AIRFLOW__SCHEDULER__MAX_DAGRUNS_PER_LOOP_TO_SCHEDULE": 40 + if high_availability + else 20, + "AIRFLOW__CORE__MAX_ACTIVE_RUNS_PER_DAG": 32 if high_availability else 16, + "AIRFLOW__KUBERNETES_EXECUTOR__WORKER_PODS_CREATION_BATCH_SIZE": 5, + "AIRFLOW__WEBSERVER__WEB_SERVER_MASTER_TIMEOUT": 120, + "AIRFLOW__WEBSERVER__WEB_SERVER_WORKER_TIMEOUT": 120, + "AIRFLOW__CORE__DAG_FILE_PROCESSOR_TIMEOUT": file_process_timeout, + "AIRFLOW__SCHEDULER__MIN_FILE_PROCESS_INTERVAL": file_process_timeout, + "AIRFLOW__SCHEDULER__TASK_QUEUED_TIMEOUT": 600, + "AIRFLOW__CORE__DAGBAG_IMPORT_TIMEOUT": 300, + "AIRFLOW__SCHEDULER__PARSING_PROCESSES": 1, + "AIRFLOW__SCHEDULER__SCHEDULER_ZOMBIE_TASK_THRESHOLD": 600, + "DATACOVES__DBT_API_URL": settings.DBT_API_URL, + "UV_CACHE_DIR": "/tmp/uv", + } + + # Update default environment variables with any custom settings from the environment configuration + custom_envs = env.airflow_config.get("custom_envs", {}) + default_envs.update(custom_envs) + return default_envs + + @classmethod + def _abs_repo_path(cls, env: Environment) -> Path: + return Path(REPO_PATH) / env.airflow_config.get("dags_folder", "") + + @classmethod + def get_env_vars(cls, env: Environment, user=None): + """Pass 'user' if we are doing this as a per-user 'local' airflow. + We will weed out some of the configuration variables that are not + relevant to local airflow.""" + + readonly_repo_path = Path(REPO_PATH) + writable_repo_path = Path(REPO_PATH_WRITABLE) + abs_dags_folder = cls._abs_repo_path(env) + yaml_dags_folder = readonly_repo_path / env.airflow_config.get( + "yaml_dags_folder", "" + ) + dbt_profiles_dir = readonly_repo_path / env.dbt_profiles_dir + dbt_home = readonly_repo_path / env.dbt_home_path + + # For the User airflow, we want just a basic set of environment + # variables and we'll use defaults for standalone. + # + # The following variables apply to both user and shared airflow. + + env_vars = [ + { + "name": "DATACOVES__DAGS_FOLDER", + "value": str(abs_dags_folder), + }, + { + "name": "DATACOVES__YAML_DAGS_FOLDER", + "value": str(yaml_dags_folder), + }, + { + "name": "DBT_PROFILES_DIR", + "value": str(dbt_profiles_dir), + }, + { + "name": "LOG_LEVEL", + "value": env.airflow_config.get("dbt_log_level", "warn"), + }, + { + "name": "AIRFLOW__WEBSERVER__BASE_URL", + "value": ( + cls.get_public_url(env, user) if user else cls.get_public_url(env) + ), + }, + {"name": "DATACOVES__ACCOUNT_SLUG", "value": str(env.account.slug)}, + {"name": "DATACOVES__PROJECT_SLUG", "value": str(env.project.slug)}, + {"name": "DATACOVES__ENVIRONMENT_SLUG", "value": str(env.slug)}, + {"name": "DATACOVES__REPO_PATH_RO", "value": str(readonly_repo_path)}, + {"name": "DATACOVES__REPO_PATH", "value": str(writable_repo_path)}, + {"name": "DATACOVES__DBT_HOME", "value": str(dbt_home)}, + {"name": "AIRFLOW__WEBSERVER__DAG_DEFAULT_VIEW", "value": "graph"}, + { + "name": "AIRFLOW__CORE__DAGS_FOLDER", + "value": str(abs_dags_folder), + }, + {"name": "PYTHONPATH", "value": str(readonly_repo_path)}, + { + "name": "DATACOVES__SECRETS_API_ENDPOINT", + "value": f"https://api.{env.cluster.domain}/api/v1/secrets/{env.slug}", + }, + { + "name": "DATACOVES__SECRETS_PUSH_ENDPOINT", + "value": f"https://api.{env.cluster.domain}/api/v1/secret-push/{env.slug}", + }, + { + "name": "DATACOVES__DBT_PROFILE", + "value": env.dbt_profile, + }, + { + "name": "AIRFLOW__CORE__TEST_CONNECTION", + "value": "Enabled", + }, + ] + env_vars.extend( + [ + {"name": name, "value": value} + for name, value in cls.get_datacoves_versions(env).items() + ] + ) + + if cls._setup_second_log_handler(env=env): + env_vars.extend( + [ + { + "name": "DATACOVES__SECONDARY_LOG_TASK_HANDLER", + "value": "loki", + }, + { + "name": "DATACOVES__LOKI_TLS_VERIFY", + "value": str( + env.airflow_config["logs"].get("loki_tls_verify", True) + ), + }, + ] + ) + + env_vars.extend( + [ + { + "name": "DATACOVES__BASE_URL_CORE_API", + "value": "http://core-api-svc.core.svc", + }, + { + "name": "DATACOVES__SECONDARY_SECRET_MANAGER", + "value": ( + env.project.secrets_secondary_backend + if env.project.secrets_secondary_backend + else "none" + ), + }, + { + "name": "DATACOVES__AIRFLOW_TYPE", + "value": "my_airflow" if user else "team_airflow", + }, + ] + ) + + env_vars.extend( + [ + {"name": name, "value": str(value)} + for name, value in cls._get_default_env_vars(env).items() + ] + ) + + upload_manifest = env.airflow_config.get("upload_manifest", "") + if upload_manifest: + env_vars.extend( + [ + { + "name": "DATACOVES__UPLOAD_MANIFEST", + "value": str(upload_manifest), + }, + { + "name": "DATACOVES__UPLOAD_MANIFEST_URL", + "value": str(env.airflow_config.get("upload_manifest_url")), + }, + { + "name": "DATACOVES__UPLOAD_MANIFEST_TOKEN", + "value": str(env.airflow_config.get("service_account_token")), + }, + ] + ) + + if user: + # These apply just to user / local airflow aka My Airflow + env_vars.extend( + [ + { + "name": "AIRFLOW__SCHEDULER__DAG_DIR_LIST_INTERVAL", + "value": "5", + }, + # Force datahub to off for local airflow + { + "name": "AIRFLOW__DATAHUB__ENABLED", + "value": "False", + }, + { + "name": "AIRFLOW__DATABASE__LOAD_DEFAULT_CONNECTIONS", + "value": "False", + }, + { + "name": "AIRFLOW__WEBSERVER__NAVBAR_COLOR", + "value": "#3496E0", + }, + { + "name": "AIRFLOW__WEBSERVER__NAVBAR_TEXT_COLOR", + "value": "#fff", + }, + { + "name": "AIRFLOW__WEBSERVER__NAVBAR_LOGO_TEXT_COLOR", + "value": "#fff", + }, + { + "name": "DATACOVES__TEAM_AIRFLOW_HOST_NAME", + "value": f"http://{env.slug}-airflow-webserver:8080", + }, + { + "name": "DATACOVES__MY_AIRFLOW_HOST_NAME", + "value": f"http://airflow-{user.slug}:8080", + }, + # NOTE: The Airflow equivalent is set in the Airflow + # adapter as it is dynamic. For My Airflow, this is + # static at the moment. + { + "name": "DATACOVES__AIRFLOW_NOTIFICATION_INTEGRATION", + "value": "", + }, + ] + ) + + return env_vars diff --git a/src/core/api/app/clusters/adapters/neo4j.py b/src/core/api/app/clusters/adapters/neo4j.py new file mode 100644 index 00000000..652b67f6 --- /dev/null +++ b/src/core/api/app/clusters/adapters/neo4j.py @@ -0,0 +1,124 @@ +import secrets + +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from . import EnvironmentAdapter + + +class Neo4jAdapter(EnvironmentAdapter): + service_name = settings.INTERNAL_SERVICE_NEO4J + deployment_name = "{env_slug}-neo4j" + default_resources = { + "requests": {"cpu": "1", "memory": "2Gi"}, + "limits": {"cpu": "1", "memory": "2Gi"}, + } + + @classmethod + def _gen_credentials_secret(cls, env: Environment, password=None): + password = password or env.neo4j_config["auth"]["password"] + return make.hashed_secret( + name="neo4j-secret", + data={ + "NEO4J_AUTH": f"neo4j/{password}", + }, + labels=cls._get_labels_adapter(), + ) + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + resources = [] + + if extra_config: + db_secret = cls._gen_credentials_secret(env) + values = cls._gen_values(env, db_secret["metadata"]["name"]) + + values_config_map = make.hashed_json_config_map( + name="neo4j-values", + data={"values.yaml": values}, + ) + resources += [values_config_map, db_secret] + + return resources + + @classmethod + def enable_service(cls, env: Environment, extra_config: list = None): + enable = True if extra_config else False + + # To avoid a mandatory update query this otherwise causes. + if env.internal_services[cls.service_name].get("enabled") != enable: + env.internal_services[cls.service_name]["enabled"] = enable + + Environment.objects.filter(id=env.id).update( + internal_services=env.internal_services, + ) + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.neo4j_config.copy() + if source: + config.update(source) + + password = secrets.token_urlsafe(32) + secret = cls._gen_credentials_secret(env, password) + + config.update( + { + "resources": config.get("resources", cls.default_resources), + "auth": config.get( + "auth", + {"password": password, "secret_name": secret["metadata"]["name"]}, + ), + } + ) + + return config + + @classmethod + def _gen_values(cls, env: Environment, credentials_secret: list): + config = env.neo4j_config + + image, tag = env.get_service_image("neo4j", "neo4j") + + cleanup_image, cleanup_tag = env.get_service_image( + "neo4j", "bitnami/kubectl", include_registry=False + ) + + values = { + "image": { + "customImage": f"{image}:{tag}", + "imagePullSecrets": [env.docker_config_secret_name], + }, + "nodeSelector": cls.VOLUMED_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "services": { + "neo4j": { + "cleanup": { + "image": { + "registry": env.docker_registry or "docker.io", + "repository": cleanup_image, + "tag": cleanup_tag, + } + }, + "spec": {"type": "ClusterIP"}, + } + }, + "containerSecurityContext": {"allowPrivilegeEscalation": False}, + "volumes": {"data": {"mode": "defaultStorageClass"}}, + "env": {"NEO4J_PLUGINS": "'[\"apoc\"]'"}, + "neo4j": { + "name": "neo4j", + "edition": "community", + "acceptLicenseAgreement": "yes", + "defaultDatabase": "graph.db", + "passwordFromSecret": credentials_secret, + }, + } + if env.cluster.defines_resource_requests: + # The chart does not accept resources requests, just limits. + values["resources"] = config["resources"]["limits"] + + return deep_merge(config.get("override_values", {}), values) diff --git a/src/core/api/app/clusters/adapters/pomerium.py b/src/core/api/app/clusters/adapters/pomerium.py new file mode 100644 index 00000000..10a56744 --- /dev/null +++ b/src/core/api/app/clusters/adapters/pomerium.py @@ -0,0 +1,86 @@ +import base64 +import re +import secrets + +from django.conf import settings +from projects.models import Environment + +from lib.kubernetes import make + +from . import EnvironmentAdapter + + +class PomeriumAdapter(EnvironmentAdapter): + service_name = settings.INTERNAL_SERVICE_POMERIUM + subdomain = "authenticate-{env_slug}" + + POMERIUM_BASE_CONFIG_SECRET_NAME = "pomerium-base-config" + POMERIUM_CONFIG_SECRET_NAME = "pomerium-config" + POMERIUM_REDIS_USERS_SECRET_NAME = "pomerium-redis-users" + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + res = [] + pomerium_config_base_secret = make.hashed_yaml_file_secret( + name=cls.POMERIUM_BASE_CONFIG_SECRET_NAME, + filename="config.yaml", + data=env.pomerium_config, + labels=cls._get_labels_adapter(), + ) + res.append(pomerium_config_base_secret) + + pomerium_redis_conn = env.pomerium_config[ + "databroker_storage_connection_string" + ] + pomerium_redis_pass = re.search( + r"\w+://[^:]+:([^@]+)@", pomerium_redis_conn + ).group(1) + pomerium_redis_users_secret = make.hashed_secret( + name=cls.POMERIUM_REDIS_USERS_SECRET_NAME, + data={"users.acl": f"user default on >{pomerium_redis_pass} ~* &* +@all\n"}, + labels=cls._get_labels_adapter(), + ) + res.append(pomerium_redis_users_secret) + return res + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.pomerium_config.copy() + if source: + config.update(source) + + if ( + "idp_provider" not in config + or "databroker_storage_connection_string" not in config + ): + oidc = cls.get_oidc_config(env, "/oauth2/callback") + cookie_secret = str( + base64.standard_b64encode(secrets.token_bytes(32)), "ascii" + ) + redis_pass = secrets.token_urlsafe(16) + pomerium_redis_conn = f"redis://default:{redis_pass}@pomerium-redis:6379/" + + config.update( + { + "idp_provider": "oidc", + "idp_provider_url": oidc["idp_provider_url"], + "idp_client_id": oidc["idp_client_id"], + "idp_client_secret": oidc["idp_client_secret"], + "idp_scopes": oidc["idp_scopes"], + "cookie_secret": cookie_secret, + "shared_secret": cookie_secret, + "databroker_storage_type": "redis", + "databroker_storage_connection_string": pomerium_redis_conn, + "timeout_read": "5m", + "timeout_write": "5m", + "timeout_idle": "5m", + "cookie_domain": f".{env.cluster.domain}", + "cookie_name": f"_{env.slug}", + } + ) + + return config + + @classmethod + def is_enabled(cls, env: Environment) -> bool: + return True diff --git a/src/core/api/app/clusters/adapters/postgresql.py b/src/core/api/app/clusters/adapters/postgresql.py new file mode 100644 index 00000000..7546fe3e --- /dev/null +++ b/src/core/api/app/clusters/adapters/postgresql.py @@ -0,0 +1,111 @@ +import secrets + +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from . import EnvironmentAdapter + + +class PostgreSQLAdapter(EnvironmentAdapter): + service_name = settings.INTERNAL_SERVICE_POSTGRESQL + deployment_name = "{env_slug}-postgresql" + default_resources = { + "requests": {"cpu": "250m", "memory": "300mi"}, + "limits": {"cpu": "500m", "memory": "1Gi"}, + } + + @classmethod + def _gen_credentials_secret(cls, env: Environment, password=None): + password = password or env.postgresql_config["auth"]["password"] + return make.hashed_secret( + name="postgresql-secret", + data={ + "postgres-password": password, + "password": password, + "replication-password": password, + }, + labels=cls._get_labels_adapter(), + ) + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + resources = [] + + if extra_config: + db_secret = cls._gen_credentials_secret(env) + values = cls._gen_values(env, db_secret["metadata"]["name"]) + + values_config_map = make.hashed_json_config_map( + name="postgresql-values", + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + resources += [values_config_map, db_secret] + + return resources + + @classmethod + def enable_service(cls, env: Environment, extra_config: list = None): + enable = True if extra_config else False + + # To avoid a mandatory update query this otherwise causes. + if env.internal_services[cls.service_name].get("enabled") != enable: + env.internal_services[cls.service_name]["enabled"] = enable + + Environment.objects.filter(id=env.id).update( + internal_services=env.internal_services, + ) + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.postgresql_config.copy() + if source: + config.update(source) + + password = secrets.token_urlsafe(32) + secret = cls._gen_credentials_secret(env, password) + + config.update( + { + "resources": config.get("resources", cls.default_resources), + "auth": config.get( + "auth", + { + "host": f"{env.slug}-postgresql", + "port": 5432, + "user": "postgres", + "password": password, + "secret_name": secret["metadata"]["name"], + }, + ), + } + ) + + return config + + @classmethod + def _gen_values(cls, env: Environment, credentials_secret: list): + config = env.postgresql_config + + image, tag = env.get_service_image( + "postgresql", "bitnami/postgresql", include_registry=False + ) + + values = { + "image": { + "registry": env.docker_registry or "docker.io", + "repository": image, + "tag": tag, + "pullSecrets": [env.docker_config_secret_name], + }, + "nodeSelector": cls.VOLUMED_NODE_SELECTOR, + "commonLabels": cls._get_labels_adapter(), + "auth": {"existingSecret": credentials_secret}, + } + if env.cluster.defines_resource_requests: + values["resources"] = config["resources"] + + return deep_merge(config.get("override_values", {}), values) diff --git a/src/core/api/app/clusters/adapters/superset.py b/src/core/api/app/clusters/adapters/superset.py new file mode 100644 index 00000000..771ff2af --- /dev/null +++ b/src/core/api/app/clusters/adapters/superset.py @@ -0,0 +1,268 @@ +import base64 +import secrets + +from django.conf import settings +from projects.models import Environment + +from lib.dicts import deep_merge +from lib.kubernetes import make + +from ..external_resources.postgres import create_database +from ..models import Cluster +from . import EnvironmentAdapter + + +class SupersetAdapter(EnvironmentAdapter): + service_name = settings.SERVICE_SUPERSET + deployment_name = "{env_slug}-superset" + subdomain = "superset-{env_slug}" + chart_versions = ["0.10.6"] + + SUPERSET_VALUES_CONFIG_MAP_NAME = "superset-values" + + @classmethod + def gen_resources(cls, env: Environment, extra_config: list = None): + resources = [] + + values = cls._gen_superset_values(env) + + values_config_map = make.hashed_json_config_map( + name=cls.SUPERSET_VALUES_CONFIG_MAP_NAME, + data={"values.yaml": values}, + labels=cls._get_labels_adapter(), + ) + resources.append(values_config_map) + + return resources + + @classmethod + def sync_external_resources(cls, env: Environment): + cls._sync_external_dbs(env) + + @classmethod + def _sync_external_dbs(cls, env: Environment): + if not env.superset_config["db"].get("external", False): + return + + if not env.cluster.has_dynamic_db_provisioning(): + return + + already_configured = ( + cls._external_db_config_unmet_preconditions(env.superset_config) == [] + ) + if already_configured: + return + + db_data = create_database(env=env, db_name="superset") + env.superset_config["db"].update(db_data) + Environment.objects.filter(id=env.id).update( + superset_config=env.superset_config + ) + + @classmethod + def get_cluster_default_config(cls, cluster: Cluster, source: dict = None) -> dict: + config = super().get_cluster_default_config(cluster=cluster, source=source) + config.update( + { + "db": config.get( + "db", + { + "external": False, + }, + ), + } + ) + + return config + + @classmethod + def get_default_config(cls, env: Environment, source: dict = None) -> dict: + config = env.superset_config.copy() + if source: + config.update(source) + + oidc = config.get("oauth") + if not oidc: + oidc = cls.get_oidc_config(env, "/oauth-authorized/datacoves") + + config.update( + { + "db": config.get( + "db", {"external": env.cluster.superset_config["db"]["external"]} + ), + "oauth": oidc, + "load_examples": config.get("load_examples", False), + "secret_key": ( + config.get( + "secret_key", + str( + base64.standard_b64encode(secrets.token_bytes(32)), "ascii" + ), + ) + ), + } + ) + + return config + + @classmethod + def get_unmet_preconditions(cls, env: Environment): + return cls._chart_version_unmet_precondition( + env + ) + cls._external_db_config_unmet_preconditions(env.superset_config) + + @classmethod + def _gen_superset_values( + cls, + env: Environment, + ): + superset_repo, superset_tag = env.get_image( + "datacovesprivate/superset-superset" + ) + redis_repo, redis_tag = env.get_service_image( + "superset", "bitnami/redis", include_registry=False + ) + + dockerize_repo, dockerize_tag = env.get_service_image( + "superset", "apache/superset" + ) + + # Loading oauth config + oauth = env.superset_config.get("oauth") + assert oauth, "Oauth settings not found for env." + # mapbox + mapbox = env.superset_config.get("MAPBOX_API_KEY", "") + if mapbox: + mapbox = f"MAPBOX_API_KEY = '{mapbox}'" + # previous secret key + prev_secret = env.superset_config.get("previous_secret_key", "") + if prev_secret: + prev_secret = f"PREVIOUS_SECRET_KEY = '{prev_secret}'" + # load examples + load_examples = env.superset_config.get("load_examples", False) + unsecure_dbs = "PREVENT_UNSAFE_DB_CONNECTIONS = False" if load_examples else "" + with open("clusters/adapters/superset/security_manager.py", "r") as file: + security_manager = file.read() + values = { + "image": { + "repository": superset_repo, + "tag": superset_tag, + "pullPolicy": "IfNotPresent", + }, + "imagePullSecrets": [{"name": env.docker_config_secret_name}], + "redis": { + "image": { + "repository": redis_repo, + "tag": redis_tag, + "pullPolicy": "IfNotPresent", + "pullSecrets": [env.docker_config_secret_name], + }, + }, + # initcontainers have a default timeout of 120 seconds. It might not be enough + # when superset is ran locally due to postgres helm chart image download process + "initImage": { + "repository": dockerize_repo, + "tag": dockerize_tag, + "pullPolicy": "IfNotPresent", + }, + "nodeSelector": cls.GENERAL_NODE_SELECTOR, + "supersetCeleryBeat": { + "enabled": True, + }, + "init": { + # Default 'admin' Admin gets created only if load_examples is true + "createAdmin": load_examples, + "loadExamples": load_examples, + }, + "supersetNode": {}, + "configOverrides": { + # This will make sure the redirect_uri is properly computed, even with SSL offloading + "my_override": security_manager + + "\n".join( + [ + "ENABLE_PROXY_FIX = True", + mapbox, + "FEATURE_FLAGS = {", + " 'DYNAMIC_PLUGINS': True,", + " 'ENABLE_EXPLORE_DRAG_AND_DROP': True", + "}", + "from flask_appbuilder.security.manager import AUTH_OAUTH", + "AUTH_TYPE = AUTH_OAUTH", + "OAUTH_PROVIDERS = [", + " { 'name':'" + oauth["idp_provider"] + "',", + " 'token_key': 'id_token',", + " 'remote_app': {", + " 'client_id':'" + oauth["idp_client_id"] + "',", + " 'client_secret':'" + + oauth["idp_client_secret"] + + "',", + " 'client_kwargs': {'scope': '" + + " ".join(oauth["idp_scopes"]) + + "'},", + " 'server_metadata_url': '" + + oauth["idp_provider_url"] + + "/.well-known/openid-configuration/'", + " }", + " }", + "]", + "AUTH_USER_REGISTRATION = True", + "AUTH_USER_REGISTRATION_ROLE = 'Gamma'", + "AUTH_ROLES_SYNC_AT_LOGIN = True", + "AUTH_ROLES_MAPPING = {'Admin': ['Admin'], 'Alpha': ['Alpha'], 'Gamma': ['Gamma']}", + prev_secret, + "SECRET_KEY = '" + env.superset_config["secret_key"] + "'", + unsecure_dbs, + "SQLLAB_TIMEOUT=120", + "SUPERSET_WEBSERVER_TIMEOUT = 120", + ] + ) + }, + } + if env.docker_registry: + values["redis"]["image"]["registry"] = env.docker_registry + + if env.superset_config.get("db", {}).get("external", False): + db_config = env.superset_config["db"] + values["supersetNode"] = { + "connections": { + "db_host": db_config["host"], + "db_port": str(db_config.get("port", 5432)), + "db_user": db_config["user"], + "db_pass": db_config["password"], + "db_name": db_config["database"], + } + } + values["postgresql"] = {"enabled": False} + + if env.cluster.defines_resource_requests: + values["resources"] = { + "requests": {"cpu": "100m", "memory": "250Mi"}, + "limits": {"cpu": "500m", "memory": "1Gi"}, + } + + # This is done to resolve cluster internal ip on local and private clusters + values["hostAliases"] = [ + { + "ip": env.cluster.internal_ip, + "hostnames": [f"api.{env.cluster.domain}"], + } + ] + + return deep_merge( + env.superset_config.get("override_values", {}), + deep_merge(env.cluster.superset_config.get("override_values", {}), values), + ) + + @classmethod + def on_post_enabled(cls, env: Environment) -> dict: + config = {} + + if ( + env.cluster.has_dynamic_db_provisioning() + and env.superset_config["db"]["external"] + ): + read_only_db_user = cls._create_read_only_db_user(env=env) + if read_only_db_user: + config["db_read_only"] = read_only_db_user + + return config diff --git a/src/core/api/app/clusters/adapters/superset/security_manager.py b/src/core/api/app/clusters/adapters/superset/security_manager.py new file mode 100644 index 00000000..03058858 --- /dev/null +++ b/src/core/api/app/clusters/adapters/superset/security_manager.py @@ -0,0 +1,50 @@ +from jose import jwt +from requests import request +from superset.security import SupersetSecurityManager + + +class CustomSecurityManager(SupersetSecurityManager): + def request(self, url, method="GET", *args, **kwargs): + kwargs.setdefault("headers", {}) + response = request(method, url, *args, **kwargs) + response.raise_for_status() + return response + + def get_jwks(self, url, *args, **kwargs): + return self.request(url, *args, **kwargs).json() + + def get_oauth_user_info(self, provider, response=None): + id_token = response["id_token"] + metadata = self.appbuilder.sm.oauth_remotes[provider].server_metadata + jwks = self.get_jwks(metadata["jwks_uri"]) + audience = self.appbuilder.sm.oauth_remotes[provider].client_id + + payload = jwt.decode( + token=id_token, + key=jwks, + algorithms=["RS256"], + audience=audience, + issuer=metadata["issuer"], + access_token=response["access_token"], + ) + + name_parts = payload["name"].split(" ", 1) + first_name = name_parts[0] + last_name = name_parts[1] if len(name_parts) > 1 else "" + permissions = payload.get("permissions", []) + if "*|write" in permissions or "security|write" in permissions: + roles = ["Admin"] + elif "data-sources|write" in permissions: + roles = ["Alpha"] + else: + roles = ["Gamma"] + return { + "email": payload["email"], + "username": payload["email"], + "first_name": first_name, + "last_name": last_name, + "role_keys": roles, + } + + +CUSTOM_SECURITY_MANAGER = CustomSecurityManager diff --git a/src/core/api/app/clusters/admin.py b/src/core/api/app/clusters/admin.py new file mode 100644 index 00000000..fa24cbba --- /dev/null +++ b/src/core/api/app/clusters/admin.py @@ -0,0 +1,98 @@ +import json + +from core.fields import EncryptedJSONField +from django.contrib import admin +from django.db import models +from django.utils.safestring import mark_safe +from django_json_widget.widgets import JSONEditorWidget +from projects.models import Release +from pygments import highlight +from pygments.formatters.html import HtmlFormatter +from pygments.lexers.data import JsonLexer + +from datacoves.admin import BaseModelAdmin + +from .models import Cluster, ClusterAlert, ClusterUpgrade + + +@admin.register(Cluster) +class ClusterAdmin(BaseModelAdmin, admin.ModelAdmin): + def formfield_for_foreignkey(self, db_field, request, **kwargs): + if db_field.name == "release": + kwargs["queryset"] = Release.objects.order_by("name") + return super().formfield_for_foreignkey(db_field, request, **kwargs) + + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + EncryptedJSONField: {"widget": JSONEditorWidget}, + } + search_fields = ("domain",) + + +@admin.register(ClusterUpgrade) +class ClusterUpgradeAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ["id", "release_name", "triggered_by", "status", "started_at"] + search_fields = ("release_name",) + + +class ClusterAlertInline(BaseModelAdmin, admin.TabularInline): + model = ClusterAlert + extra = 0 + can_delete = False + exclude = ["data"] + readonly_fields = ( + "name", + "namespace", + "cluster", + "environment", + "status", + "resolved", + "data_prettified", + ) + + def has_add_permission(self, request, obj): + return False + + def has_change_permission(self, request, obj=None): + return False + + def data_prettified(self, instance): + """Function to display pretty version of our data""" + response = json.dumps(instance.data, sort_keys=True, indent=4) + formatter = HtmlFormatter(style="colorful") + response = highlight(response, JsonLexer(), formatter) + style = "
" + return mark_safe(style + response) + + data_prettified.short_description = "data" + + +@admin.register(ClusterAlert) +class ClusterAlertAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = [ + "id", + "summary", + "namespace", + "created_at", + "is_system_alert", + "resolved", + ] + readonly_fields = ["data_prettified"] + exclude = ["data"] + search_fields = ("name", "data") + + def get_readonly_fields(self, request, obj=None): + excluded = ["data"] + return list(self.readonly_fields) + [ + f.name for f in self.model._meta.fields if f.name not in excluded + ] + + def data_prettified(self, instance): + """Function to display pretty version of our data""" + response = json.dumps(instance.data, sort_keys=True, indent=4) + formatter = HtmlFormatter(style="colorful") + response = highlight(response, JsonLexer(), formatter) + style = "
" + return mark_safe(style + response) + + data_prettified.short_description = "data" diff --git a/src/core/api/app/clusters/apps.py b/src/core/api/app/clusters/apps.py new file mode 100644 index 00000000..89bdbc6c --- /dev/null +++ b/src/core/api/app/clusters/apps.py @@ -0,0 +1,10 @@ +from django.apps import AppConfig + + +class ClustersConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "clusters" + + def ready(self): + from . import signals # noqa F401 + from .metrics import signals # noqa F401 diff --git a/src/core/api/app/clusters/builder.py b/src/core/api/app/clusters/builder.py new file mode 100644 index 00000000..7c4dfd23 --- /dev/null +++ b/src/core/api/app/clusters/builder.py @@ -0,0 +1,108 @@ +import logging + +from clusters import workspace +from django.core.exceptions import PermissionDenied +from django.db.models.functions import Now +from projects.models import Environment, UserEnvironment + +logger = logging.getLogger(__name__) + + +class WorkbenchBuilder: + def __init__(self, user, env_slug): + self.user = user + self.env_slug = env_slug + self.ue = None + self.environment = None + + def check_permissions(self) -> "WorkbenchBuilder": + if self.environment is None: + self.set_environment() + + project = self.user.projects.filter(environments__slug=self.env_slug).first() + if not project and not self.user.is_account_admin( + self.environment.project.account.slug + ): + raise PermissionDenied() + + return self + + def set_environment(self) -> "WorkbenchBuilder": + """Validate user access to an Environment and retrieve it""" + self.environment = Environment.objects.select_related("project__account").get( + slug=self.env_slug + ) + + return self + + def heartbeat(self) -> "WorkbenchBuilder": + self.ue = self.build() + if self.ue: + # Update heartbeat without triggering save signal. + UserEnvironment.objects.filter(id=self.ue.id).update( + heartbeat_at=Now(), + code_server_active=True, + ) + + if not self.ue.code_server_active: + # Trigger save signal, to run workspace.sync. + logger.info( + "Code server is not active, triggering save signal for %s", + self.ue, + ) + UserEnvironment.objects.get(id=self.ue.id).save() + + return self + + @property + def code_server(self) -> "WorkbenchCodeServerBuilder": + return WorkbenchCodeServerBuilder(self.user, self.env_slug) + + @property + def status(self) -> "WorkbenchStatusBuilder": + return WorkbenchStatusBuilder(self.user, self.env_slug) + + def build(self) -> UserEnvironment: + return UserEnvironment.objects.filter( + user=self.user, environment__slug=self.env_slug + ).first() + + +class WorkbenchCodeServerBuilder(WorkbenchBuilder): + def __init__(self, user, env_slug): + super().__init__(user, env_slug) + ue, _ = UserEnvironment.objects.get_or_create( + environment__slug=self.env_slug, user=self.user + ) + self.ue = ue + + def restart(self) -> "WorkbenchCodeServerBuilder": + self.ue.restart_code_server() + return self + + def enable_local_airflow(self) -> "WorkbenchCodeServerBuilder": + self.ue.enabled_local_airflow() + return self + + def update_settings(self, data) -> "WorkbenchCodeServerBuilder": + self.ue.code_server_config = self.ue.code_server_config or {} + self.ue.code_server_config.update(data) + self.ue.save() + return self + + def build(self) -> UserEnvironment: + return self.ue + + +class WorkbenchStatusBuilder(WorkbenchBuilder): + def __init__(self, user, env_slug): + super().__init__(user, env_slug) + self.workloads_status = None + + def check_status(self) -> "WorkbenchStatusBuilder": + ue = super().build() + self.workloads_status = workspace.user_workloads_status(ue=ue) + return self + + def build(self) -> dict: + return self.workloads_status diff --git a/src/core/api/app/clusters/cleanup_k8s_resources.py b/src/core/api/app/clusters/cleanup_k8s_resources.py new file mode 100644 index 00000000..6c3f53b7 --- /dev/null +++ b/src/core/api/app/clusters/cleanup_k8s_resources.py @@ -0,0 +1,38 @@ +from clusters.models.cluster import Cluster +from django.conf import settings + +from lib.kubernetes.k8s_utils import ( + get_all_secrets_and_config_maps_resources_noused_by_namespace, +) + + +def cleanup_k8s_resources(namespace: str) -> list: + k8s_res_noused = get_all_secrets_and_config_maps_resources_noused_by_namespace( + namespace=namespace + ) + return k8s_res_noused + + +def cleanup_cluster_k8s_extra_resources(cluster: Cluster) -> list: + k8s_res_noused = [] + if not cluster.code_server_config["overprovisioning"]["enabled"]: + labels = f"datacoves.com/adapter={settings.SERVICE_CODE_SERVER}" + kubectl = cluster.kubectl + deployments_op = kubectl.AppsV1Api.list_namespaced_deployment( + namespace="core", + label_selector=labels, + ).items + for item in deployments_op: + item.api_version = "apps/v1" + item.kind = "Deployment" + k8s_res_noused.append(item) + + priority_classes = kubectl.SchedulingV1Api.list_priority_class( + label_selector=labels, + ).items + for item in priority_classes: + item.api_version = "scheduling.k8s.io/v1" + item.kind = "PriorityClass" + k8s_res_noused.append(item) + + return k8s_res_noused diff --git a/src/core/api/app/clusters/config_loader/__init__.py b/src/core/api/app/clusters/config_loader/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/config_loader/base.py b/src/core/api/app/clusters/config_loader/base.py new file mode 100644 index 00000000..7dd5a89f --- /dev/null +++ b/src/core/api/app/clusters/config_loader/base.py @@ -0,0 +1,155 @@ +""" +Base class for both kinds of configuration loader, to provide common +features such as diffing. +""" + +import difflib +import json +import logging +from enum import Enum + +from clusters.adapters import EnvironmentAdapter +from core.models import DatacovesModel + +logger = logging.getLogger(__name__) + + +class DiffEnum(Enum): + NO_CHANGES = 0 + APPLY_CHANGES = 1 + DO_NOT_APPLY_CHANGES = 2 + + +class BaseConfigLoader: + @classmethod + def _get_configuration_diff( + cls, + current_name: str, + current_value: any, + source_name: str, + source_value: any, + req_user_confirm: bool, + ) -> DiffEnum: + """Look for the differences between two sources + + Args: + current_name (str): Name of the current value + current_value (any): Current value + source (str): Name of the new value + source_value (any): New value + req_user_confirm (bool): Require user confirmation? Used by tests + + Returns: + DiffEnum: Return strategy to apply changes + """ + + if isinstance(current_value, dict): + current_value = json.dumps( + current_value, sort_keys=True, indent=4, separators=(",", ": ") + ) + source_value = json.dumps( + source_value, sort_keys=True, indent=4, separators=(",", ": ") + ) + + elif not isinstance(current_value, str): + current_value = str(current_value) + source_value = str(source_value) + + diff = difflib.unified_diff( + current_value.splitlines(), + source_value.splitlines(), + fromfile=current_name, + tofile=source_name, + lineterm="", + n=1, + ) + + diff = list(filter(lambda x: not x.startswith("@@"), diff)) + if diff: + if req_user_confirm: + from rich.console import Console + from rich.prompt import Confirm + + console = Console() + for line in diff: + if line.startswith("+"): + line = f"[green]{line}[/green]" + elif line.startswith("-"): + line = f"[red]{line}[/red]" + console.print(line) + + confirm = Confirm.ask( + f"Do you want to overwrite {current_name}?", default=False + ) + return ( + DiffEnum.APPLY_CHANGES if confirm else DiffEnum.DO_NOT_APPLY_CHANGES + ) + + return DiffEnum.APPLY_CHANGES # E.g. Integrantion test + + return DiffEnum.NO_CHANGES + + @classmethod + def _generate_model_name(cls, model: DatacovesModel) -> str: + model_name = model.__class__.__name__ + + if hasattr(model, "slug"): + model_name += f" {model.slug}" + else: + model_name += f" {model.id}" + + return model_name + + @classmethod + def _update_config( + cls, + model: DatacovesModel, + env_config: dict, + created: bool, + source: str, + req_user_confirm: bool, + ): + """Update model config.""" + model_name = cls._generate_model_name(model) + + for key, value in env_config.items(): + apply_changes = DiffEnum.APPLY_CHANGES + + if not created: + apply_changes = cls._get_configuration_diff( + current_name=model_name, + current_value=getattr(model, key), + source_name=source, + source_value=value, + req_user_confirm=req_user_confirm, + ) + + if apply_changes == DiffEnum.APPLY_CHANGES: + setattr(model, key, value) + + @classmethod + def _validate_config_diff( + cls, + model: DatacovesModel, + adapter: EnvironmentAdapter, + source_config: any, + source: str, + req_user_confirm: bool, + ) -> DiffEnum: + """This is for diffing against the default configuration based on + an adapter. I made it generic like I made everything else, but it + probably is only useful for environments. + """ + + current_config = adapter.get_default_config(model) + new_config = adapter.get_default_config(model, source_config) + + model_name = f"{adapter.service_name} config " + cls._generate_model_name(model) + + return cls._get_configuration_diff( + current_name=model_name, + current_value=current_config, + source_name=source, + source_value=new_config, + req_user_confirm=req_user_confirm, + ) diff --git a/src/core/api/app/clusters/config_loader/cluster.py b/src/core/api/app/clusters/config_loader/cluster.py new file mode 100644 index 00000000..b5c6fe9e --- /dev/null +++ b/src/core/api/app/clusters/config_loader/cluster.py @@ -0,0 +1,701 @@ +import grp +import logging +import os +import pwd +from pathlib import Path + +from billing.models import Plan, Product +from clusters.adapters import all +from clusters.models import Cluster, ClusterUpgrade +from codegen.models import SQLHook, Template +from django.contrib.auth.models import Group +from django.db import transaction +from django.utils import timezone +from projects.git import test_git_connection +from projects.models import ( + ConnectionTemplate, + ConnectionType, + Environment, + ExtendedGroup, + Profile, + ProfileFile, + Project, + Release, + Repository, +) +from projects.models.repository import SSHKey, UserRepository +from users.models import Account, User + +from lib.dicts import pick_fields +from lib.networking import resolve_ip + +from ..external_resources.postgres import create_database_custom, create_read_only_user +from .base import BaseConfigLoader + +logger = logging.getLogger(__name__) + + +def read_profile_file(file_name): + file_path = Path(__file__).resolve().parent / "profile_files" / file_name + with open(file_path) as f: + return f.read() + + +class ClusterConfigLoader(BaseConfigLoader): + @classmethod + def load( + cls, + params: dict, + core_db_service_account_ro=None, + envs_to_not_bump=[], + pricing_model=None, + create_default_user=False, + req_user_confirm=False, + ): + """ + Loads params into the database and bumps envs + """ + + cluster_domain = params["domain"] + + account_data = params["account"] + account_name = account_data["name"] + account_slug = account_data["slug"] + account_owner_data = account_data.pop("owner") + account_owner_email = account_owner_data.pop("email") + + profiles_data = params.pop("profiles", []) + + projects_data = params.get("projects") + if projects_data: + # We use keys just to group projects data with their secrets + projects_data = list(projects_data.values()) + else: + if "project" in params: + projects_data = [params["project"]] + else: + projects_data = [] + + with transaction.atomic(): + Environment.objects.filter(cluster__domain=cluster_domain).update( + sync=False + ) + + owner, _ = User.objects.get_or_create( + email=account_owner_email, defaults=account_owner_data + ) + + account, _ = Account.objects.get_or_create( + slug=account_slug, defaults={"created_by": owner, "name": account_name} + ) + + for profile_data in profiles_data: + # Profile updates are tricky and can't be done through the UI. + # This, we will check for changes and ask if we want to update + # rather than assume. New profiles will be created if they + # don't already exist. + + profile = pick_fields(Profile, profile_data) + profile_name = profile["name"] + + # Make sure we want to update it. + existing_profile = Profile.objects.filter(name=profile_name).first() + + if existing_profile: + logging.info(f"Updating profile {profile_name}") + cls._update_config( + existing_profile, + profile, + False, + "cluster-params.yaml", + req_user_confirm, + ) + existing_profile.save() + + else: + logging.info(f"Creating profile {profile_name}") + Profile.objects.create(**profile) + + for project_data in projects_data: + # We will no longer update project data from cluster-params, + # only create it. This will let us create new projects in + # an automated fashion if we need to (i.e. for testing) but + # it won't overwrite settings on a production environment. + + project_slug = project_data["slug"] + + logging.info( + f"Creating project {project_slug} if it doesn't already " "exist." + ) + + repo_data = project_data.pop("repository") + repo_git_url = repo_data["git_url"] + + groups_data = project_data.pop("groups", {}) + conn_templates_data = project_data.pop( + "connections", project_data.pop("connection_templates", {}) + ) + + repo, _ = Repository.objects.get_or_create( + git_url=repo_git_url.lower(), + defaults=pick_fields(Repository, repo_data), + ) + + project_data = pick_fields(Project, project_data) + project_data["account"] = account + project_data["repository"] = repo + project, _ = Project.objects.get_or_create( + slug=project_slug, defaults=project_data + ) + + cls._update_connection_templates(project, conn_templates_data) + cls._update_groups(account, project, groups_data) + + if "internal_dns_url" in params: + params["internal_dns_ip"] = resolve_ip(params["internal_dns_url"]) + + params["release"] = Release.objects.get(name=params["release"]) + + cluster = Cluster.objects.filter(domain=cluster_domain).first() + + if not cluster: + logging.info(f"Creating new cluster with domain {cluster_domain}") + + created = True + cluster = Cluster.objects.create(**pick_fields(Cluster, params)) + + else: + logging.info(f"Updating cluster with domain {cluster_domain}") + created = False + + cls._update_config( + cluster, + pick_fields(Cluster, params), + False, + "cluster-params.yaml", + req_user_confirm, + ) + + # Cluster save handler is run in _update_cluster_config, + # no need to run it again here. + + if core_db_service_account_ro: + cls._create_core_db_service_account_read_only( + cluster=cluster, + db_user=core_db_service_account_ro["username"], + db_pass=core_db_service_account_ro["password"], + ) + + cls._create_grafana_db_service_account(cluster=cluster) + cls._update_cluster_config( + cluster=cluster, params=params, req_user_confirm=req_user_confirm + ) + + cls._setup_base_data(account) + + if create_default_user: + owner.groups.set(Group.objects.all()) + password = os.environ.get("DEFAULT_SUPERUSER_PWD") + if not owner.check_password(password): + owner.set_password(password) + owner.save() + print(f"Superuser {owner.email} updated") + + cls.create_ssh_key(owner, repo, project) + + Environment.objects.filter(cluster__domain=cluster_domain).update(sync=True) + + logger.info(f"Cluster successfully {'created' if created else 'updated'}.") + + for env in Environment.objects.exclude(slug__in=envs_to_not_bump): + bumped = env.bump_release() + logger.info( + "Environment %s %s %s", + env.slug, + "bumped to" if bumped else "kept on", + env.release.name, + ) + + # TODO: Clean all this up, by moving it into a new `installer` app. + cls._load_pricing_model(pricing_model, req_user_confirm) + + # Complete upgrade record + upgrade = ClusterUpgrade.objects.filter(cluster=cluster).order_by("-id").first() + if upgrade: + upgrade.finished_at = timezone.now() + upgrade.save() + + return cluster + + @classmethod + def _create_core_db_service_account_read_only( + cls, cluster: Cluster, db_user: str, db_pass: str + ): + credentials = create_read_only_user(db_user=db_user, db_pass=db_pass) + credentials.update( + {"description": "Read-only service account for core api database"} + ) + pg_service_account = {"postgres_core_ro": credentials} + cluster.service_account.update(pg_service_account) + + @classmethod + def _create_grafana_db_service_account(cls, cluster: Cluster): + if cluster.service_account.get("postgres_grafana"): + return + + credentials = create_database_custom(db_name="grafana") + credentials.update({"description": "Service account for Grafana database"}) + pg_service_account = {"postgres_grafana": credentials} + cluster.service_account.update(pg_service_account) + + def create_ssh_key(owner: User, repo: Repository, project: Project): + try: + private_key = os.environ.get("SUPER_USER_PRIVATE_KEY") + assert ( + private_key + ), "SUPER_USER_PRIVATE_KEY value not found. Did you run ./cli.py reveal_secrets?" + private_key = private_key.replace("\\n", "\n") + private_keys = SSHKey.objects.values_list("private", flat=True) + if private_key not in private_keys: + SSHKey.objects.new( + created_by=owner, associate=True, private=private_key + ) + print("SSH Key created") + else: + print("SSH key was already created") + + # Create known hosts with the proper permissions + # TODO: organize this code block. + known_hosts_path = Path("/home/abc/.ssh/known_hosts") + known_hosts_path.parent.mkdir(parents=True, exist_ok=True) + uid = pwd.getpwnam("abc").pw_uid + gid = grp.getgrnam("abc").gr_gid + os.chown(known_hosts_path.parent, uid, gid) + os.chmod(known_hosts_path.parent, 0o744) + # Test the repository and projects + try: + user_repository = UserRepository.objects.get( + repository=repo, user=owner + ) + err_string = ( + f"An error occured. The repository could not be tested. {repo}" + ) + response = test_git_connection( + data={"user_repository_id": user_repository.id} + ) + if response.status_code == 400: + print(f"{err_string}. {response.data['message']}") + else: + print("Git clone tested on git repository: ", repo.git_url) + os.chown(known_hosts_path, uid, gid) + os.chmod(known_hosts_path, 0o644) + except Exception: + print(err_string) + + try: + response = test_git_connection(data={"project_id": project.id}) + err_string_project = f"An error occured. Git clone could not be tested on the project: {project}" + if response.status_code == 400: + print(err_string_project) + else: + print("Git clone tested on project: ", project.slug) + except Exception: + print(err_string_project) + + except Exception as e: + print(f"SSH Key could not be created. {e}") + + @classmethod + def _load_pricing_model(cls, pricing_model, req_user_confirm: bool): + """Note - I am forcing req_user_confirm to False so as not to + require confirmation here, which is quite noisy. However, I didn't + want to take out all this code in case we want it later as I + work on reforming pricing. + """ + + req_user_confirm = False + + if pricing_model: + for product_id, product in pricing_model["products"].items(): + db_product = Product.objects.filter(id=product_id).first() + + if not db_product: + product["id"] = product_id + Product.objects.create(**product) + + else: + cls._update_config( + db_product, product, False, "pricing.yaml", req_user_confirm + ) + db_product.save() + + for plan_slug, plan in pricing_model["plans"].items(): + db_plan = Plan.objects.filter(slug=plan_slug).first() + + if not db_plan: + plan["slug"] = plan_slug + Plan.objects.create(**plan) + + else: + cls._update_config( + db_plan, plan, False, "pricing.yaml", req_user_confirm + ) + db_plan.save() + + @classmethod + def _update_cluster_config( + cls, cluster: Cluster, params: dict, req_user_confirm: bool + ): + for name, adapter in all.EXTERNAL_ADAPTERS.items(): + try: + logger.info(f"Updating cluster default configuration for {name}...") + config = adapter.get_cluster_default_config( + cluster=cluster, source=params.get(adapter.config_attr(), {}) + ) + + # Just set it if it is blank + if ( + hasattr(cluster, adapter.config_attr()) + and getattr(cluster, adapter.config_attr()) != {} + ): + cls._update_config( + cluster, + {adapter.config_attr(): config}, + False, + "cluster-params.yaml", + req_user_confirm, + ) + + else: + setattr(cluster, adapter.config_attr(), config) + + except AttributeError: + pass + + cluster.save() + + @classmethod + def _update_groups(cls, account, project, groups_data): + """Updates identity groups of groups""" + if "admins" in groups_data: + group = ExtendedGroup.objects.get( + account=account, role=ExtendedGroup.Role.ROLE_ACCOUNT_ADMIN + ) + group.identity_groups = groups_data["admins"] + group.save() + if "developers" in groups_data: + group = ExtendedGroup.objects.get( + project=project, role=ExtendedGroup.Role.ROLE_PROJECT_DEVELOPER + ) + group.identity_groups = groups_data["developers"] + group.save() + if "viewers" in groups_data: + group = ExtendedGroup.objects.get( + project=project, role=ExtendedGroup.Role.ROLE_PROJECT_VIEWER + ) + group.identity_groups = groups_data["viewers"] + group.save() + if "sysadmins" in groups_data: + group = ExtendedGroup.objects.get( + project=project, role=ExtendedGroup.Role.ROLE_PROJECT_SYSADMIN + ) + group.identity_groups = groups_data["sysadmins"] + group.save() + + @classmethod + def _update_connection_templates(cls, project, conn_templates_data): + """Updates connections""" + ConnectionType.objects.create_defaults() + + for conn_name, conn_config in conn_templates_data.items(): + type_name = conn_config.pop("type") + conn_config["type"] = ConnectionType.objects.get(slug=type_name) + ConnectionTemplate.objects.get_or_create( + name=conn_name, project=project, defaults=conn_config + ) + + @classmethod + def _setup_base_data(cls, account): + ( + template_set_snowflake_user_public_key, + _, + ) = Template.objects.get_or_create( + slug="set_snowflake_user_public_key", + defaults={ + "name": "Set Snowflake user public key", + "description": "Sets the user RSA_PUBLIC_KEY in snowflake.\n" + "If user does not exist, it will throw an error.", + "content": "ALTER USER {{ user }} SET RSA_PUBLIC_KEY='{{ ssl_public_key }}';", + "context_type": Template.CONTEXT_TYPE_USER_CREDENTIAL, + "format": Template.FORMAT_SQL_SNOWFLAKE, + "enabled_for": [Template.USAGE_SQLHOOKS], + "created_by": None, + }, + ) + connection_overrides = { + "user": "SVC_DATACOVES", + "role": "SECURITYADMIN", + "password": "", # Fill it in by hand from the admin. TODO: Load from secrets? + } + SQLHook.objects.get_or_create( + account=account, + slug=template_set_snowflake_user_public_key.slug, + defaults={ + "name": template_set_snowflake_user_public_key.name, + "connection_overrides": connection_overrides, + "template": template_set_snowflake_user_public_key, + "trigger": SQLHook.TRIGGER_USER_CREDENTIAL_PRE_SAVE, + "connection_type": ConnectionType.objects.get( + slug=ConnectionType.TYPE_SNOWFLAKE + ), + "enabled": False, + "created_by": None, + }, + ) + + # Connection with custom username generated + Template.objects.update_or_create( + slug="connection-username-admin", + defaults={ + "name": "Connection username for admins", + "description": "Generates a connection username for admins using the pattern admin_.", + "content": "admin_{{ username }}", + "context_type": Template.CONTEXT_TYPE_USER, + "format": Template.FORMAT_NONE, + "enabled_for": [Template.USAGE_CONNECTION_TEMPLATES], + "created_by": None, + "updated_by": None, + }, + ) + + profile, _ = Profile.objects.update_or_create( + slug="default", + defaults={"name": "Default", "slug": "default", "created_by": None}, + ) + + profile.files.all().delete() + + dbt_profiles_tem, _ = Template.objects.update_or_create( + slug="dbt_profiles_yml", + defaults={ + "name": "dbt profiles yml", + "description": "Generates profiles.yml file required by dbt to connect to databases.", + "content": read_profile_file("dbt_profiles.yml"), + "context_type": Template.CONTEXT_TYPE_USER_CREDENTIALS, + "format": Template.FORMAT_YAML, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/.dbt/profiles.yml", + profile=profile, + template=dbt_profiles_tem, + override_existent=True, + ) + + dbt_deps_tem, _ = Template.objects.update_or_create( + slug="dbt_deps", + defaults={ + "name": "dbt deps", + "description": "Runs dbt deps on dbt project.", + "content": read_profile_file("run_dbt_deps.sh"), + "context_type": Template.CONTEXT_TYPE_NONE, + "format": Template.FORMAT_BASH, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/tmp/run_dbt_deps.sh", + profile=profile, + template=dbt_deps_tem, + execute=True, + ) + + # vscode templates + vscode_user_cfg_tem, _ = Template.objects.update_or_create( + slug="vscode-user-config", + defaults={ + "name": "vscode user config", + "description": "User Visual Studio Code configuration file", + "content": read_profile_file("vscode_user_settings.json"), + "context_type": Template.CONTEXT_TYPE_USER_CREDENTIALS, + "format": Template.FORMAT_JSON, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/data/User/settings.json", + profile=profile, + template=vscode_user_cfg_tem, + override_existent=True, + ) + + vscode_frontend_tem, _ = Template.objects.update_or_create( + slug="vscode-frontend", + defaults={ + "name": "vscode frontend", + "description": "Visual Studio Code frontend code file", + "content": read_profile_file("workbench.html"), + "context_type": Template.CONTEXT_TYPE_NONE, + "format": Template.FORMAT_HTML, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/app/code-server/lib/vscode/out/vs/code/browser/workbench/workbench.html", + profile=profile, + template=vscode_frontend_tem, + override_existent=True, + ) + + vscode_remote_cfg_tem, _ = Template.objects.update_or_create( + slug="vscode-remote-config", + defaults={ + "name": "vscode remote config", + "description": "Machine Visual Studio Code configuration file", + "content": read_profile_file("vscode_remote_settings.json"), + "context_type": Template.CONTEXT_TYPE_USER_CREDENTIALS, + "format": Template.FORMAT_JSON, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/data/Machine/settings.json", + profile=profile, + template=vscode_remote_cfg_tem, + override_existent=True, + ) + + # user settings templates + user_bashrc_tem, _ = Template.objects.update_or_create( + slug="user-bashrc", + defaults={ + "name": "user .bashrc", + "description": "Runs .bashrc on the system", + "content": read_profile_file(".bashrc"), + "context_type": Template.CONTEXT_TYPE_NONE, + "format": Template.FORMAT_BASH, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/.bashrc", + profile=profile, + template=user_bashrc_tem, + override_existent=True, + ) + + user_keybindings_tem, _ = Template.objects.update_or_create( + slug="user-keybindings", + defaults={ + "name": "user keybindings", + "description": "User keybindings configuration file", + "content": read_profile_file("keybindings.json"), + "context_type": Template.CONTEXT_TYPE_ENVIRONMENT, + "format": Template.FORMAT_JSON, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/data/User/keybindings.json", + profile=profile, + template=user_keybindings_tem, + override_existent=True, + ) + + gitconfig_template, _ = Template.objects.update_or_create( + slug="user-gitconfig", + defaults={ + "name": "user gitconfig", + "description": "User git configuration file", + "content": read_profile_file(".gitconfig"), + "context_type": Template.CONTEXT_TYPE_USER, + "format": Template.FORMAT_BASH, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/.gitconfig", + profile=profile, + template=gitconfig_template, + override_existent=True, + ) + + snowflake_toml_template, _ = Template.objects.update_or_create( + slug="snowflake-toml", + defaults={ + "name": "snowflake extension configuration file", + "description": "Snowflake extension TOML cfg file", + "content": read_profile_file("snowflake_extension.toml"), + "context_type": Template.CONTEXT_TYPE_USER_CREDENTIALS, + "format": Template.FORMAT_NONE, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/.snowflake/connections.toml", + profile=profile, + template=snowflake_toml_template, + override_existent=True, + permissions=ProfileFile.PERMISSION_600, + ) + + pre_commit_template, _ = Template.objects.update_or_create( + slug="precommit-hook", + defaults={ + "name": "pre-commit hook", + "description": "branch protection pre-commit hook", + "content": read_profile_file("pre-commit"), + "context_type": Template.CONTEXT_TYPE_ENVIRONMENT, + "format": Template.FORMAT_NONE, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/workspace/.git/hooks/pre-commit", + profile=profile, + template=pre_commit_template, + override_existent=True, + execute=True, + ) + + snowflake_config_toml_template, _ = Template.objects.update_or_create( + slug="snowflake-config-toml", + defaults={ + "name": "snowflake connector config file", + "description": "Snowflake connector config file", + "content": read_profile_file("snowflake_config.toml"), + "context_type": Template.CONTEXT_TYPE_USER_CREDENTIALS, + "format": Template.FORMAT_NONE, + "enabled_for": [Template.USAGE_PROFILE_FILES], + "created_by": None, + "updated_by": None, + }, + ) + ProfileFile.objects.create( + mount_path="/config/.snowflake/config.toml", + profile=profile, + template=snowflake_config_toml_template, + override_existent=True, + permissions=ProfileFile.PERMISSION_600, + ) diff --git a/src/core/api/app/clusters/config_loader/environment.py b/src/core/api/app/clusters/config_loader/environment.py new file mode 100644 index 00000000..b264bcd8 --- /dev/null +++ b/src/core/api/app/clusters/config_loader/environment.py @@ -0,0 +1,267 @@ +import logging + +from clusters import workspace +from clusters.adapters.airbyte import AirbyteAdapter +from clusters.adapters.airflow import AirflowAdapter +from clusters.adapters.code_server import CodeServerAdapter +from clusters.adapters.superset import SupersetAdapter +from clusters.models import Cluster +from django.conf import settings +from projects.models import ( + ConnectionTemplate, + Environment, + Profile, + Project, + Release, + ServiceCredential, +) + +from .base import BaseConfigLoader, DiffEnum + +logger = logging.getLogger(__name__) + + +class EnvironmentConfigLoader(BaseConfigLoader): + @classmethod + def load( + cls, + env_slug: str, + env_config: dict, + service_config={}, + run_async=True, + req_user_confirm=False, + ): + """ + Creates or updates an environment and related models from dict variables + """ + credentials_data = env_config.pop("service_credentials", {}) + profile = env_config.pop("profile", "default") + release = Release.objects.get(name=env_config["release"]) + project_slug = env_config.pop("project", None) + cluster_domain = env_config.pop("domain", None) + + if project_slug: + project = Project.objects.get(slug=project_slug) + else: + project = Project.objects.order_by("-updated_at").first() + + if cluster_domain: + cluster = Cluster.objects.get(domain=cluster_domain) + else: + cluster = Cluster.objects.order_by("-updated_at").first() + + env_config["slug"] = env_slug + env_config["project"] = project + env_config["cluster"] = cluster + env_config["release"] = release + env_config["docker_config"] = settings.DEFAULT_DOCKER_CONFIG + env_config["profile"] = Profile.objects.get(slug=profile) + + try: + created = False + env = Environment.objects.get(slug=env_slug) + except Environment.DoesNotExist: + created = True + env = Environment(slug=env_slug) + + cls._update_config( + model=env, + env_config=env_config, + created=created, + source=f"environments/{env.slug}/environment.yaml", + req_user_confirm=req_user_confirm, + ) + + # Disable workspace synchronization while updating env. + env.sync = False + env.save() + + enabled_services = [ + svc + for svc, svc_config in env_config.get("services", {}).items() + if svc_config.get("enabled") + ] + + setup_services = { + settings.SERVICE_CODE_SERVER: { + "setup": cls._setup_code_server, + "adapter": CodeServerAdapter, + }, + settings.SERVICE_AIRBYTE: { + "setup": cls._setup_airbyte, + "adapter": AirbyteAdapter, + }, + settings.SERVICE_AIRFLOW: { + "setup": cls._setup_airflow, + "adapter": AirflowAdapter, + }, + settings.SERVICE_SUPERSET: { + "setup": cls._setup_superset, + "adapter": SupersetAdapter, + }, + } + + # Validate if service config should be applied. + for service in setup_services.keys(): + if service in enabled_services: + apply_changes = DiffEnum.APPLY_CHANGES + # Environment already exists + if not created: + apply_changes = cls._validate_config_diff( + model=env, + adapter=setup_services[service]["adapter"], + source_config=service_config[service], + source=f"environments/{env.slug}/environment.yaml", + req_user_confirm=req_user_confirm, + ) + + if apply_changes in (DiffEnum.APPLY_CHANGES, DiffEnum.NO_CHANGES): + setup_services[service]["setup"]( + env, cluster, service_config.get(service, {}) + ) + + cls._update_credentials(env, credentials_data) + + # This dance is to run workspace.sync synchronously now instead of async + # through signals and celery tasks. We do this so exceptions are logged + # to the console during install. + env.save() + Environment.objects.filter(id=env.id).update(sync=True) + env.sync = True + workspace.sync(env, "register_environment.handle", run_async) + logger.info( + "Environment %s successfully %s.", env, "created" if created else "updated" + ) + + services = Environment.objects.get(id=env.id).services + unmet_preconditions = [ + item + for service in services.values() + for item in service.get("unmet_precondition", []) + ] + + if unmet_preconditions: + logger.info("unmet preconditions %s", unmet_preconditions) + + return env + + @classmethod + def _setup_code_server( + cls, env: Environment, cluster: Cluster, code_server_config: dict + ): + """Code server""" + env.code_server_config = CodeServerAdapter.get_default_config( + env, code_server_config + ) + + @classmethod + def _setup_airbyte(cls, env: Environment, cluster: Cluster, airbyte_config: dict): + """Airbyte setup""" + env.airbyte_config = AirbyteAdapter.get_default_config(env, airbyte_config) + cls._setup_external_db( + env.airbyte_config, + cluster, + cluster.airbyte_config["db"]["external"], + airbyte_config, + settings.SERVICE_AIRBYTE, + ) + cls._setup_external_logs( + env.airbyte_config, + cluster, + cluster.airbyte_config["logs"]["external"], + airbyte_config, + settings.SERVICE_AIRBYTE, + ) + + @classmethod + def _setup_airflow(cls, env: Environment, cluster: Cluster, airflow_config: dict): + """Airflow setup""" + env.airflow_config = AirflowAdapter.get_default_config(env, airflow_config) + cls._setup_external_db( + env.airflow_config, + cluster, + cluster.airflow_config["db"]["external"], + airflow_config, + settings.SERVICE_AIRFLOW, + ) + + cls._setup_external_logs( + env.airflow_config, + cluster, + cluster.airflow_config["logs"]["external"], + airflow_config, + settings.SERVICE_AIRFLOW, + ) + + @classmethod + def _setup_superset(cls, env: Environment, cluster: Cluster, superset_config: dict): + """Superset setup""" + env.superset_config = SupersetAdapter.get_default_config(env, superset_config) + cls._setup_external_db( + env.superset_config, + cluster, + cluster.superset_config["db"]["external"], + superset_config, + settings.SERVICE_SUPERSET, + ) + + @classmethod + def _update_credentials(cls, environment: Environment, credentials_data: dict): + for name, config in credentials_data.items(): + service, name = name.split(".") + connection_template_name = config.pop("connection", None) + if not connection_template_name: + connection_template_name = config.pop("connection_template") + config["connection_template"] = ConnectionTemplate.objects.get( + project=environment.project, name=connection_template_name + ) + ServiceCredential.objects.update_or_create( + environment=environment, service=service, name=name, defaults=config + ) + + @classmethod + def _setup_external_db( + cls, + env_config, + cluster: Cluster, + db_external: bool, + config: dict, + service: str, + ): + if db_external: + db_config = config.get("db", {}) + if not db_config and not cluster.has_dynamic_db_provisioning(): + raise ValueError( + f"{service.capitalize()} db configuration is missing but expected since" + f" cluster.{service}_config['db']['external'] = True." + ) + + if "db" in env_config: + env_config["db"]["external"] = True + else: + env_config["db"] = {"external": True} + env_config["db"].update(db_config) + + @classmethod + def _setup_external_logs( + cls, + env_config, + cluster: Cluster, + logs_external: bool, + config: dict, + service: str, + ): + if logs_external: + logs_config = config.get("logs", {}) + if not logs_config and not cluster.has_dynamic_blob_storage_provisioning(): + raise ValueError( + f"{service.capitalize()} logs configuration is missing but expected" + f" since cluster.{service}_config['logs']['external'] = True." + ) + + if "logs" in env_config: + env_config["logs"]["external"] = True + else: + env_config["logs"] = {"external": True} + + env_config["logs"].update(logs_config) diff --git a/src/core/api/app/clusters/config_loader/profile_files/.bashrc b/src/core/api/app/clusters/config_loader/profile_files/.bashrc new file mode 100644 index 00000000..7372604d --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/.bashrc @@ -0,0 +1,14 @@ +#!/bin/bash +export PS1="\[\033[1;36m\]\w \[\033[0m\]\$ " + +export PATH=/config/.local/bin:$PATH + +source /usr/share/bash-completion/completions/git + +alias code=code-server + + +if [[ -f "/config/.bash_custom" ]]; then + source "/config/.bash_custom" +fi + diff --git a/src/core/api/app/clusters/config_loader/profile_files/.gitconfig b/src/core/api/app/clusters/config_loader/profile_files/.gitconfig new file mode 100644 index 00000000..62375f6e --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/.gitconfig @@ -0,0 +1,20 @@ +[user] +name = {{name}} +email = {{email}} + +[pull] +rebase = false + +[fetch] +prune = true + +[diff] +colorMoved = zebra + +[alias] +br = !git branch +co = !git checkout +l = !git log +st = !git status +po = !git pull origin main +prune-branches = !git remote prune origin && git branch -vv | grep ': gone]' | awk '{print $1}' | xargs -r git branch -D diff --git a/src/core/api/app/clusters/config_loader/profile_files/dbt_profiles.yml b/src/core/api/app/clusters/config_loader/profile_files/dbt_profiles.yml new file mode 100644 index 00000000..f36ab269 --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/dbt_profiles.yml @@ -0,0 +1,67 @@ +{% if connections|length > 0%} +{{ environment.dbt_profile }}: + outputs: + {% set ns = namespace(main_target='') %} + {% for con in connections %} + {% if ns.main_target == '' or 'dev' not in ns.main_target %} + {% set ns.main_target = con.slug %} + {% endif %} + {{ con.slug }}: + {% if con.type == 'snowflake' %} + type: snowflake + account: {{ con.account }} + warehouse: {{ con.warehouse }} + database: {{ con.database }} + role: {{ con.role }} + schema: {{ con.schema }} + user: {{ con.user }} + {% if con.ssl_public_key %} + private_key_path: /config/.ssl/{{ con.slug }}-private.pem + {% else %} + password: "{{ con.password | escape_quotes }}" + {% endif %} + {% if 'mfa_protected' in con and con.mfa_protected %} + authenticator: username_password_mfa + {% endif %} + threads: 16 + {% elif con.type == 'redshift' %} + type: redshift + host: {{ con.host }} + user: {{ con.user }} + password: "{{ con.password | escape_quotes}}" + port: 5439 + dbname: {{ con.database }} + schema: {{ con.schema }} + threads: 8 + keepalives_idle: 240 + connect_timeout: 900 + {% elif con.type == 'databricks' %} + type: databricks + schema: {{ con.schema }} + host: {{ con.host }} + http_path: {{ con.http_path }} + token: {{ con.token }} + threads: 8 + {% elif con.type == 'bigquery' %} + type: bigquery + method: service-account-json + project: {{ con.keyfile_json.project_id }} + dataset: {{ con.dataset }} + threads: 8 + keyfile_json: + type: {{ con.keyfile_json.type }} + project_id: {{ con.keyfile_json.project_id }} + private_key_id: {{ con.keyfile_json.private_key_id }} + private_key: {{ con.keyfile_json.private_key | tojson }} + client_email: {{ con.keyfile_json.client_email }} + client_id: {{ con.keyfile_json.client_id }} + auth_uri: {{ con.keyfile_json.auth_uri }} + token_uri: {{ con.keyfile_json.token_uri }} + auth_provider_x509_cert_url: {{ con.keyfile_json.auth_provider_x509_cert_url }} + client_x509_cert_url: {{ con.keyfile_json.client_x509_cert_url }} + {% else %} + # TODO: {{ con.type }} connection type not supported yet + {% endif %} + {% endfor %} + target: {{ ns.main_target }} +{% endif %} \ No newline at end of file diff --git a/src/core/api/app/clusters/config_loader/profile_files/keybindings.json b/src/core/api/app/clusters/config_loader/profile_files/keybindings.json new file mode 100644 index 00000000..3ef0fc7d --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/keybindings.json @@ -0,0 +1,27 @@ +[ + { + "key": "ctrl+alt+w", + "command": "workbench.action.closeActiveEditor" + }, + { + "key": "cmd+alt+w", + "command": "workbench.action.closeActiveEditor" + }, + { + "key": "ctrl+shift+l", + "command": "editor.action.selectHighlights", + "when": "editorFocus" + }, + {% if release_profile == "dbt-redshift" %} + { + "key": "cmd+enter", + "command": "sqltools.executeCurrentQuery", + "when": "editorTextFocus && editorLangId == 'sql'" + }, + { + "key": "ctrl+enter", + "command": "sqltools.executeCurrentQuery", + "when": "editorTextFocus && editorLangId == 'sql'" + }, + {% endif %} +] \ No newline at end of file diff --git a/src/core/api/app/clusters/config_loader/profile_files/pre-commit b/src/core/api/app/clusters/config_loader/profile_files/pre-commit new file mode 100644 index 00000000..24bb73d2 --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/pre-commit @@ -0,0 +1,9 @@ +#!/bin/sh +{% if protected_branch %} +current_branch="$(git branch --show-current)" +if [ "$current_branch" = "{{ protected_branch }}" ]; then + echo "ERROR: $current_branch branch is protected. Please create a feature branch." + exit 1 +fi +{% endif %} +exit 0 diff --git a/src/core/api/app/clusters/config_loader/profile_files/run_dbt_deps.sh b/src/core/api/app/clusters/config_loader/profile_files/run_dbt_deps.sh new file mode 100644 index 00000000..ae64d307 --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/run_dbt_deps.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +MODULES="$DBT_HOME"/dbt_modules +PACKAGES="$DBT_HOME"/dbt_packages + +if [ -d "${MODULES}" ] || [ -d "${PACKAGES}" ]; then + echo "dbt packages already installed." +else + if [ -d "$DBT_HOME" ]; then + echo "Installing dbt packages..." + cd $DBT_HOME + sudo -u abc bash -c "/config/.local/bin/dbt deps" + else + echo "Dbt deps not ran as DBT_HOME does not exist (yet)." + fi +fi diff --git a/src/core/api/app/clusters/config_loader/profile_files/snowflake_config.toml b/src/core/api/app/clusters/config_loader/profile_files/snowflake_config.toml new file mode 100644 index 00000000..eccd77cf --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/snowflake_config.toml @@ -0,0 +1,10 @@ +{% if environment.release_profile == "dbt-snowflake" %} +{% set snowflake_cons = connections | selectattr('type', 'equalto', 'snowflake') | list %} +{% set first_snowflake = snowflake_cons[0].name if snowflake_cons else '' %} +default_connection_name = "{{ first_snowflake }}" + +[cli.logs] +save_logs = true +path = "/config/.snowflake/logs" +level = "info" +{% endif %} \ No newline at end of file diff --git a/src/core/api/app/clusters/config_loader/profile_files/snowflake_extension.toml b/src/core/api/app/clusters/config_loader/profile_files/snowflake_extension.toml new file mode 100644 index 00000000..86e87e97 --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/snowflake_extension.toml @@ -0,0 +1,23 @@ +{% if environment.release_profile == "dbt-snowflake" %} +{% for con in connections %} +{% if con.type == 'snowflake' %} +[{{con.name}}] +account = "{{ con.account }}" +user = "{{ con.user }}" +warehouse = "{{ con.warehouse }}" +database = "{{ con.database }}" +schema = "{{ con.schema }}" +role = "{{ con.role }}" +{% if con.ssl_public_key %} +authenticator = "SNOWFLAKE_JWT" +private_key_path = "/config/.ssl/{{ con.name }}-private.pem" +{% elif 'mfa_protected' in con and con.mfa_protected %} +authenticator = "username_password_mfa" +password = "{{ con.password | escape_quotes }}" +{% else %} +authenticator = "SNOWFLAKE" +password = "{{ con.password | escape_quotes }}" +{% endif %} +{% endif %} +{% endfor %} +{% endif %} diff --git a/src/core/api/app/clusters/config_loader/profile_files/vscode_remote_settings.json b/src/core/api/app/clusters/config_loader/profile_files/vscode_remote_settings.json new file mode 100644 index 00000000..46ba1d6e --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/vscode_remote_settings.json @@ -0,0 +1,3 @@ +{ + "snowflake.connectionsConfigFile": "/config/.snowflake/connections.toml" +} diff --git a/src/core/api/app/clusters/config_loader/profile_files/vscode_user_settings.json b/src/core/api/app/clusters/config_loader/profile_files/vscode_user_settings.json new file mode 100644 index 00000000..40b0e7c1 --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/vscode_user_settings.json @@ -0,0 +1,579 @@ +{% if environment.dbt_home_path %} +{% set dbt_home_subpath = "/" ~ environment.dbt_home_path %} +{% else %} +{% set dbt_home_subpath = "" %} +{% endif %} +{% if environment.release_profile == "dbt-snowflake" %} +{% set sql_file_association = "snowflake-sql" %} +{% else %} +{% set sql_file_association = "sql" %} +{% endif %} +{ + "betterStatusBar.commands": [ + { + "id": "20", + "label": "✔️ lint current", + "command": "sqlfluff lint ${file}", + "color": "white", + "alignment": 1, + "priority": 10, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "30", + "label": "✔️ Run checks", + "command": "pre-commit run", + // "command": "pre-commit run --from-ref origin/main --to-ref HEAD", + "color": "lightgray", + "alignment": 1, + "priority": 10, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "40", + "label": "Generate Properties", + "command": "dbt-coves generate properties -s ${fileBasenameNoExtension}", + "color": "yellow", + // 1 = left side | 2 = right side + "alignment": 1, + // Determines the location of the button. Higher priority is farther to the left. + "priority": 8, + // Setting this to null defaults to the command value (which you could then use to add a keyboard shortcut to) as the tooltip text. + "tooltip": "generate yml file for current file", + // Change this to false if you only want the button to show in the dropdown. + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + // Setting a name allows the terminal to be re-used by other commands with the same name. + "name": "datacoves" + } + }, + { + "id": "45", + "label": "Generate Sources", + "command": "dbt-coves generate sources", + "color": "pink", + // 1 = left side | 2 = right side + "alignment": 1, + // Determines the location of the button. Higher priority is farther to the left. + "priority": 8, + // Setting this to null defaults to the command value (which you could then use to add a keyboard shortcut to) as the tooltip text. + "tooltip": "generate staging files", + // Change this to false if you only want the button to show in the dropdown. + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + // Setting a name allows the terminal to be re-used by other commands with the same name. + "name": "datacoves" + } + }, + { + "id": "47", + "label": "Generate Airflow Dag for YML", + "command": "dbt-coves generate airflow-dags --yaml-path ${file}", + "color": "pink", + "alignment": 1, + "priority": 8, + "tooltip": "generate python dag from current yml template", + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + // Setting a name allows the terminal to be re-used by other commands with the same name. + "name": "datacoves" + } + }, + { + "id": "50", + "label": "Generate Metadata", + "command": "dbt-coves generate metadata", + "color": "yellow", + // 1 = left side | 2 = right side + "alignment": 1, + // Determines the location of the button. Higher priority is farther to the left. + "priority": 8, + // Setting this to null defaults to the command value (which you could then use to add a keyboard shortcut to) as the tooltip text. + "tooltip": "generate metadata file for generating models and properties", + // Change this to false if you only want the button to show in the dropdown. + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + // Setting a name allows the terminal to be re-used by other commands with the same name. + "name": "datacoves" + } + }, + { + // The label makes adding the command to the dropdown easier. + "id": "90", + "label": "▶️ Run current", + "command": "dbt run --select ${fileBasenameNoExtension}", + "color": "lightgreen", + // 1 = left side | 2 = right side + "alignment": 1, + // Determines the location of the button. Higher priority is farther to the left. + "priority": 9, + // Setting this to null defaults to the command value (which you could then use to add a keyboard shortcut to) as the tooltip text. + "tooltip": "dbt run current file", + // Change this to false if you only want the button to show in the dropdown. + "showButton": true, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + // Setting a name allows the terminal to be re-used by other commands with the same name. + "name": "datacoves" + } + }, + { + "id": "92", + "label": "▶️ Run Upstream models", + "command": "dbt run --select +${fileBasenameNoExtension}", + "color": "lightgreen", + "alignment": 1, + "priority": 10, + "tooltip": "dbt run models upstream of current file", + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + // Setting a name allows the terminal to be re-used by other commands with the same name. + "name": "datacoves" + } + }, + { + "id": "94", + "label": "▶️ Run Downstream models", + "command": "dbt run --select ${fileBasenameNoExtension}+", + "color": "lightgreen", + "alignment": 1, + "priority": 10, + "tooltip": "dbt run models downstream of current file", + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "96", + "label": "▶️ Run Upstream/Downstream models", + "command": "dbt run --select +${fileBasenameNoExtension}+", + "color": "lightgreen", + "alignment": 1, + "priority": 10, + "tooltip": "dbt run models upstream and downstream of current file", + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "100", + "label": "🗳️ Get Production metadata (for state modified)", + "command": "/config/workspace/automate/dbt/get_artifacts.sh", + "color": "white", + "alignment": 1, + "priority": 1, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "105", + "label": "📝 Generate dbt Docs", + "command": "dbt docs generate", + "color": "white", + "alignment": 1, + "priority": 1, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "120", + "label": "⏭ Build all models and ther tests", + "command": "dbt build", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "122", + "label": "⏭ Build Upstream models and their tests", + "command": "dbt build --select +${fileBasenameNoExtension}", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "124", + "label": "⏭ Build Downstream models and their tests", + "command": "dbt build --select ${fileBasenameNoExtension}+", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "126", + "label": "⏭ Build Upstream/Downstream models and their tests", + "command": "dbt build --select +${fileBasenameNoExtension}+", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "130", + "label": "⏭ Build all models changed", + "command": "dbt build -s state:modified --state logs", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "140", + "label": "☑︎ Test current model", + "command": "dbt test --select ${fileBasenameNoExtension}", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "142", + "label": "☑︎ Test all models", + "command": "dbt test", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "144", + "label": "☑︎ Test Upstream models", + "command": "dbt test --select +${fileBasenameNoExtension}", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "146", + "label": "☑︎ Test Downstream models", + "command": "dbt test --select ${fileBasenameNoExtension}+", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "datacoves" + } + }, + { + "id": "148", + "label": "☑︎ Test Upstream/Downstream models", + "command": "dbt test --select +${fileBasenameNoExtension}+", + "color": "white", + "alignment": 1, + "priority": 0, + "tooltip": null, + "showButton": false, + "terminal": { + "cwd": "/config/workspace{{ dbt_home_subpath }}", + "singleInstance": true, + "clear": true, + "focus": false, + "name": "bash2" + } + } + ], + "betterStatusBar.defaultColor": "statusBar.foreground", + "betterStatusBar.dropdowns": [ + { + "id": "Dropdown", + "label": "More...", + // The id or the label can be used to show the commands. + "commands": [ + "40", + "45", + "47", + "50", + "90", + "92", + "94", + "96", + "100", + "105", + "20", + "30", + "120", + "122", + "124", + "122", + "124", + "126", + "130", + "140", + "142", + "144", + "146", + "148" + ], + "tooltip": "Additional Commands" + } + ], + "betterStatusBar.loadNpmCommands": false, + // Set this to null to reload on configuration change in settings or this file. + "betterStatusBar.reloadButton": null, + "cSpell.checkOnlyEnabledFileTypes": true, + "cSpell.enableFiletypes": [ + "yaml", + "markdown", + "!python" + ], + "csv-edit.initiallyFixedRowsTop": 1, + "csv-edit.readOption_hasHeader": "true", + "datacovesCopilot.defaultModel": "gpt-4.1-mini", + "datacovesCopilot.presets": [ + { + "name": "Document dbt model", + "prompt": "Document the following dbt model. Add the description wherever it is missing and do not ovverride existing descriptions. Return the result as a code block" + }, + { + "name": "Explain dbt model", + "prompt": "What does this dbt model do?" + }, + { + "name": "Explain dbt macro", + "prompt": "What does this dbt macro do?" + }, + { + "name": "Add descriptions to CSV", + "prompt": "Add descriptions to this csv file for each column" + }, + ], + "datacovesCopilot.systemPrompt": "You are a senior dbt analytics engineer. You are given a CODEBASE, a SELECTION, and a USER QUERY. You are to answer the USER QUERY based on the CODEBASE and the SELECTION. If only an explanation is needed, respond with a clear and digestible answer in short paragraphs or bullet lists that a lay person can understand", + "dbt.dbtPythonPath": "/usr/bin/python", + "dbt.terminal.name": "datacoves", + "dbt.terminal.output": "none", + "dbt.terminal.preserveFocus": true, + "dbt.terminal.readonly": false, + "editor.hover.delay": 1500, + "editor.minimap.enabled": false, + "editor.rulers": [140], + "extensions.autoCheckUpdates": false, + "extensions.autoUpdate": false, + "files.associations": { + "*.sql": "{{ sql_file_association }}", + "**/{models,macros,analysis,snapshots,tests}/**/*.sql": "jinja-sql" + }, + "files.insertFinalNewline": true, + "files.trimFinalNewlines": true, + "files.trimTrailingWhitespace": true, + "git.autofetch": true, + {% if environment.protected_branch != "" %} + "git.branchProtection": "{{ environment.protected_branch }}", + {% endif %} + "git.enableSmartCommit": true, + "git.postCommitCommand": "sync", + "redhat.telemetry.enabled": false, + "remote.autoForwardPortsSource": "process", + "security.workspace.trust.enabled": false, + {% if environment.release_profile == "dbt-snowflake" %} + "snowflake.showExecuteAboveStatement.enabled": false, + {% endif %} + "sqlfluff.config": "/config/workspace{{ dbt_home_subpath }}/.sqlfluff", + "sqlfluff.dbtInterface.enabled": true, + "sqlfluff.dbtInterface.host": "localhost", + "sqlfluff.dbtInterface.port": 8581, + "sqlfluff.executablePath": "/config/.local/bin/sqlfluff", + "sqlfluff.experimental.format.executeInTerminal": true, + "sqlfluff.ignoreParsing": false, + "sqlfluff.linter.run": "onType", + {% if environment.release_profile == "dbt-redshift" %} + "sqltools.connections": [ + {% for con in connections %} + {% if con.type == 'redshift' %} + { + "pgOptions": { + "ssl": true + }, + "previewLimit": 50, + "server": "{{con.host}}", + "port": 5439, + "driver": "PostgreSQL", + "name": "{{con.name}}", + "database": "{{con.database}}", + "username": "{{con.user}}" + }{% if not loop.last %}, + {% endif %} + + {% endif %} + {% endfor %} + ], + "sqltools.results.location": "current", + "sqltools.results.reuseTabs": "connection", + {% endif %} + "telemetry.enableTelemetry": false, + "terminal.integrated.cursorBlinking": true, + "terminal.integrated.cwd": "/config/workspace{{ dbt_home_subpath }}", + "terminal.integrated.gpuAcceleration": "off", + "terminal.integrated.tabs.title": "datacoves", + "window.commandCenter": false, + "workbench.layoutControl.enabled": false, + "workbench.colorTheme": "Default Dark+", + "workbench.editorAssociations": { + "*.ipynb": "jupyter.notebook.ipynb" + }, + "workbench.editor.enablePreviewFromCodeNavigation": true, + "workbench.editor.enablePreviewFromQuickOpen": true, + "workbench.sideBar.location": "left", + "workbench.startupEditor": "none", + "yaml.schemas": { + "https://raw.githubusercontent.com/dbt-labs/dbt-jsonschema/main/schemas/latest/dbt_project-latest.json": [ + "{{ dbt_home_subpath }}/dbt_project.yml" + ], + "https://raw.githubusercontent.com/dbt-labs/dbt-jsonschema/main/schemas/latest/dbt_yml_files-latest.json": [ + "{{ dbt_home_subpath }}/**/*.yml", + "!{{ dbt_home_subpath }}/*.yml", + "!{{ dbt_home_subpath }}/.dbt_coves/*.yml" + ], + "https://raw.githubusercontent.com/dbt-labs/dbt-jsonschema/main/schemas/latest/selectors-latest.json": [ + "{{ dbt_home_subpath }}/selectors.yml" + ], + "https://raw.githubusercontent.com/dbt-labs/dbt-jsonschema/main/schemas/latest/packages-latest.json": [ + "{{ dbt_home_subpath }}/packages.yml" + ] + }, + // Deprecated (remove once dbt-osmosis is completely unused) + "dbt.osmosisCheck": false, + "dbt.server.createServer": false, + "dbt.server.persistentHealthCheck": false, + "dbt.versionCheck": "neither", + "sqlfluff.osmosis.enabled": true, + "sqlfluff.osmosis.host": "localhost", + "sqlfluff.osmosis.port": 8581, + // end deprecation warning +} diff --git a/src/core/api/app/clusters/config_loader/profile_files/workbench.html b/src/core/api/app/clusters/config_loader/profile_files/workbench.html new file mode 100644 index 00000000..ba718fa6 --- /dev/null +++ b/src/core/api/app/clusters/config_loader/profile_files/workbench.html @@ -0,0 +1,180 @@ +{% raw %} + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +{% endraw %} diff --git a/src/core/api/app/clusters/debug_toolbar.py b/src/core/api/app/clusters/debug_toolbar.py new file mode 100644 index 00000000..a9c15ec7 --- /dev/null +++ b/src/core/api/app/clusters/debug_toolbar.py @@ -0,0 +1,5 @@ +from django.conf import settings + + +def show_toolbar(request): + return settings.DEBUG diff --git a/src/core/api/app/clusters/external_resources/__init__.py b/src/core/api/app/clusters/external_resources/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/external_resources/efs.py b/src/core/api/app/clusters/external_resources/efs.py new file mode 100644 index 00000000..f4ece24e --- /dev/null +++ b/src/core/api/app/clusters/external_resources/efs.py @@ -0,0 +1,11 @@ +from projects.models import Environment + + +def create_filesystem(env: Environment): + provisioner = env.cluster.efs_provisioner + global_efs = provisioner.get("global") + if not global_efs: + raise NotImplementedError( + "Dynamic creation of EFS filesystems not implemented yet." + ) + return global_efs diff --git a/src/core/api/app/clusters/external_resources/postgres.py b/src/core/api/app/clusters/external_resources/postgres.py new file mode 100644 index 00000000..2a39a58a --- /dev/null +++ b/src/core/api/app/clusters/external_resources/postgres.py @@ -0,0 +1,234 @@ +import secrets +from os import environ + +import psycopg2 +from projects.models import Environment + + +def create_read_only_user(db_user: str, db_pass: str, schema="public"): + db_host = environ.get("DB_HOST") + db_port = environ.get("DB_PORT", 5432) + db_name = environ.get("DB_NAME") + + conn = psycopg2.connect( + host=db_host, + port=db_port, + user=environ.get("DB_USER"), + dbname=db_name, + password=environ.get("DB_PASS"), + ) + conn.set_session(autocommit=True) + + try: + with conn.cursor() as cur: + # Create user + cur.execute(f"SELECT 1 FROM pg_user WHERE usename = '{db_user}';") + user_exists = cur.fetchone() + if user_exists: + cur.execute(f"ALTER USER {db_user} WITH PASSWORD '{db_pass}';") + else: + cur.execute(f"CREATE USER {db_user} WITH LOGIN PASSWORD '{db_pass}';") + + # Read-only permissions + cur.execute( + f"GRANT CONNECT ON DATABASE {db_name} TO {db_user};" + f"GRANT SELECT ON ALL TABLES IN SCHEMA {schema} TO {db_user};" + f"ALTER DEFAULT PRIVILEGES IN SCHEMA {schema} GRANT SELECT ON TABLES TO {db_user};" + ) + + return { + "host": db_host, + "port": db_port, + "user": db_user, + "password": db_pass, + "database": db_name, + } + + finally: + conn.close() + + +def create_read_only_user_for_service( + env: Environment, service_name: str, schema="public" +): + config_attr = f"{service_name.lower().replace('-', '_')}_config" + env_data_db = getattr(env, config_attr)["db"] + + if env_data_db.get("external", False): + provisioner = env.cluster.postgres_db_provisioner + else: + provisioner = env_data_db + + db_host = provisioner["host"] + db_port = provisioner.get("port", 5432) + db_name = env_data_db["database"] + provisioning_user = provisioner["user"] + + conn = psycopg2.connect( + host=db_host, + port=db_port, + user=provisioning_user, + dbname=db_name, + password=provisioner.get("pass", provisioner.get("password")), + ) + conn.set_session(autocommit=True) + + user_created = f"{env.slug}_{service_name}_ro" + password_create = secrets.token_urlsafe(12) + try: + with conn.cursor() as cur: + # Create user + cur.execute(f"SELECT 1 FROM pg_user WHERE usename = '{user_created}';") + user_exists = cur.fetchone() + if user_exists: + cur.execute( + f"ALTER USER {user_created} WITH PASSWORD '{password_create}';" + ) + else: + cur.execute( + f"CREATE USER {user_created} WITH LOGIN PASSWORD '{password_create}';" + ) + + # Read-only permissions + cur.execute( + f"GRANT CONNECT ON DATABASE {db_name} TO {user_created};" + f"GRANT SELECT ON ALL TABLES IN SCHEMA {schema} TO {user_created};" + f"ALTER DEFAULT PRIVILEGES IN SCHEMA {schema} GRANT SELECT ON TABLES TO {user_created};" + ) + + return { + "host": db_host, + "port": db_port, + "user": user_created, + "password": password_create, + "database": db_name, + } + + finally: + conn.close() + + +def create_database(env: Environment, db_name: str, can_create_db=False) -> dict: + provisioner = env.cluster.postgres_db_provisioner + db_host = provisioner["host"] + db_port = provisioner.get("port", 5432) + provisioning_user = provisioner["user"] + provisioning_user_db = provisioner.get("db", provisioning_user) + db_name = f"{env.slug}_{db_name}" + + conn = psycopg2.connect( + host=db_host, + port=db_port, + user=provisioning_user, + dbname=provisioning_user_db, + password=provisioner["pass"], + ) + conn.set_session(autocommit=True) + + db_pass = secrets.token_urlsafe(12) + db_user = db_name + + try: + with conn.cursor() as cur: + cur.execute(f"SELECT 1 FROM pg_user WHERE usename = '{db_user}';") + user_exists = cur.fetchone() + if user_exists: + cur.execute(f"ALTER USER {db_user} WITH PASSWORD '{db_pass}';") + else: + cur.execute(f"CREATE USER {db_user} PASSWORD '{db_pass}';") + + cur.execute( + f"GRANT {db_user} TO {provisioning_user};" + ) # Making provisioning user member of new role + + cur.execute(f"SELECT 1 FROM pg_database WHERE datname = '{db_name}';") + db_exists = cur.fetchone() + if db_exists: + cur.execute(f"ALTER DATABASE {db_name} OWNER TO {db_user};") + else: + cur.execute(f"CREATE DATABASE {db_name} OWNER {db_user};") + + cur.execute( + f"REVOKE CONNECT ON DATABASE {db_name} FROM PUBLIC;" + f"GRANT CONNECT ON DATABASE {db_name} TO {db_user};" + f"GRANT CONNECT ON DATABASE {db_name} TO {provisioning_user};" + ) + + if can_create_db: + cur.execute( + f"ALTER user {db_user} CREATEDB;" + f"GRANT CONNECT ON DATABASE postgres TO {db_user};" + ) + + finally: + conn.close() + + return { + "host": db_host, + "port": db_port, + "user": db_user, + "password": db_pass, + "database": db_name, + } + + +def create_database_custom(db_name: str, can_create_db=False) -> dict: + db_host = environ.get("DB_HOST") + db_port = environ.get("DB_PORT", 5432) + provisioning_user = environ.get("DB_USER") + provisioning_db_name = environ.get("DB_NAME") + + conn = psycopg2.connect( + host=db_host, + port=db_port, + user=provisioning_user, + dbname=provisioning_db_name, + password=environ.get("DB_PASS"), + ) + conn.set_session(autocommit=True) + + db_pass = secrets.token_urlsafe(12) + db_user = db_name + + try: + with conn.cursor() as cur: + cur.execute(f"SELECT 1 FROM pg_user WHERE usename = '{db_user}';") + user_exists = cur.fetchone() + if user_exists: + cur.execute(f"ALTER USER {db_user} WITH PASSWORD '{db_pass}';") + else: + cur.execute(f"CREATE USER {db_user} PASSWORD '{db_pass}';") + + cur.execute( + f"GRANT {db_user} TO {provisioning_user};" + ) # Making provisioning user member of new role + + cur.execute(f"SELECT 1 FROM pg_database WHERE datname = '{db_name}';") + db_exists = cur.fetchone() + if db_exists: + cur.execute(f"ALTER DATABASE {db_name} OWNER TO {db_user};") + else: + cur.execute(f"CREATE DATABASE {db_name} OWNER {db_user};") + + cur.execute( + f"REVOKE CONNECT ON DATABASE {db_name} FROM PUBLIC;" + f"GRANT CONNECT ON DATABASE {db_name} TO {db_user};" + f"GRANT CONNECT ON DATABASE {db_name} TO {provisioning_user};" + ) + + if can_create_db: + cur.execute( + f"ALTER user {db_user} CREATEDB;" + f"GRANT CONNECT ON DATABASE postgres TO {db_user};" + ) + + finally: + conn.close() + + return { + "host": db_host, + "port": db_port, + "user": db_user, + "password": db_pass, + "database": db_name, + } diff --git a/src/core/api/app/clusters/external_resources/s3.py b/src/core/api/app/clusters/external_resources/s3.py new file mode 100644 index 00000000..5b989536 --- /dev/null +++ b/src/core/api/app/clusters/external_resources/s3.py @@ -0,0 +1,27 @@ +from projects.models import Environment + +from .s3_provisioner import S3Provisioner + + +def create_bucket(env: Environment, name: str): + provisioner = S3Provisioner(env, name) + if not provisioner.bucket_exists(): + provisioner.create_bucket() + if not provisioner.user_exists(): + provisioner.create_user() + provisioner.update_user_policy() + + # Delete existing key pairs + user_keys = provisioner.list_keys() + for key in user_keys: + provisioner.delete_key(key.id) + + # Create new key + key_pair = provisioner.create_key() + return { + "access_key": key_pair.id, + "secret_key": key_pair.secret, + "s3_log_bucket": provisioner.resource_name(), + "s3_log_bucket_region": provisioner.region, + "backend": "s3", + } diff --git a/src/core/api/app/clusters/external_resources/s3_provisioner.py b/src/core/api/app/clusters/external_resources/s3_provisioner.py new file mode 100644 index 00000000..81877d12 --- /dev/null +++ b/src/core/api/app/clusters/external_resources/s3_provisioner.py @@ -0,0 +1,171 @@ +import json +import logging + +import boto3 +from botocore.exceptions import ClientError + +logger = logging.getLogger(__name__) + + +class S3Provisioner: + def __init__(self, env, name): + provisioner = env.cluster.s3_provisioner + session = boto3.Session( + aws_access_key_id=provisioner["aws_access_key_id"], + aws_secret_access_key=provisioner["aws_secret_access_key"], + ) + self.cluster_name = env.cluster.domain.split(".")[0] + self.region = provisioner["region"] + self.env_slug = env.slug + self.name = name + self.iam = session.resource("iam") + self.s3 = session.resource("s3") + + def resource_name(self): + return f"datacoves-{self.cluster_name}-{self.env_slug}-{self.name}" + + def bucket_exists(self): + """ + Returns true if the bucket exists. + """ + try: + self.s3.meta.client.head_bucket(Bucket=self.resource_name()) + bucket_exists = True + except ClientError: + # The bucket does not exist or you have no access. + bucket_exists = False + return bucket_exists + + def create_bucket(self): + """Creates S3 bucket""" + name = self.resource_name() + config = {"Bucket": name} + if self.region != "us-east-1": + config["CreateBucketConfiguration"] = {"LocationConstraint": self.region} + try: + bucket = self.s3.create_bucket(**config) + logger.info("Created bucket %s.", name) + except ClientError: + logger.exception(f"Couldn't create bucket {name}") + raise + else: + return bucket + + def user_exists(self): + """ + Returns if user exists + """ + try: + user = self.iam.User(self.resource_name()) + user.load() + return True + except ClientError: + return False + + def create_user(self): + """ + Creates a user. By default, a user has no permissions or access keys. + """ + try: + user = self.iam.create_user(UserName=self.resource_name()) + logger.info("Created user %s.", user.name) + return user + except ClientError: + logger.exception("Couldn't create user %s.", self.resource_name()) + raise + + def delete_user(self): + """ + Deletes a user. Before a user can be deleted, all associated resources, + such as access keys and policies, must be deleted or detached. + + :param user_name: The name of the user. + """ + try: + self.iam.User(self.resource_name()).delete() + logger.info("Deleted user %s.", self.resource_name()) + except ClientError: + logger.exception("Couldn't delete user %s.", self.resource_name()) + raise + + def update_user_policy(self): + name = self.resource_name() + user_policy = self.iam.UserPolicy(name, name) + user_policy.put( + PolicyDocument=json.dumps( + { + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "ListObjectsInBucket", + "Effect": "Allow", + "Action": ["s3:ListBucket"], + "Resource": [f"arn:aws:s3:::{name}"], + }, + { + "Sid": "AllObjectActions", + "Effect": "Allow", + "Action": "s3:*Object", + "Resource": [f"arn:aws:s3:::{name}/*"], + }, + ], + } + ) + ) + + # Access keys + def create_key(self): + """ + Creates an access key for the specified user. Each user can have a + maximum of two keys. + + :param user_name: The name of the user. + :return: The created access key. + """ + try: + key_pair = self.iam.User(self.resource_name()).create_access_key_pair() + logger.info( + "Created access key pair for %s. Key ID is %s.", + key_pair.user_name, + key_pair.id, + ) + except ClientError: + logger.exception( + "Couldn't create access key pair for %s.", self.resource_name() + ) + raise + else: + return key_pair + + def delete_key(self, key_id): + """ + Deletes a user's access key. + + :param user_name: The user that owns the key. + :param key_id: The ID of the key to delete. + """ + try: + key = self.iam.AccessKey(self.resource_name(), key_id) + key.delete() + logger.info("Deleted access key %s for %s.", key.id, self.resource_name()) + except ClientError: + logger.exception( + "Couldn't delete key %s for %s", key_id, self.resource_name() + ) + raise + + def list_keys(self): + """ + Lists the keys owned by the specified user. + + :param user_name: The name of the user. + :return: The list of keys owned by the user. + """ + try: + keys = list(self.iam.User(self.resource_name()).access_keys.all()) + logger.info("Got %s access keys for %s.", len(keys), self.resource_name()) + except ClientError: + logger.exception("Couldn't get access keys for %s.", self.resource_name()) + raise + else: + return keys diff --git a/src/core/api/app/clusters/management/__init__.py b/src/core/api/app/clusters/management/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/management/commands/cluster_config.py b/src/core/api/app/clusters/management/commands/cluster_config.py new file mode 100644 index 00000000..c39ac3a7 --- /dev/null +++ b/src/core/api/app/clusters/management/commands/cluster_config.py @@ -0,0 +1,35 @@ +import json +import sys + +from clusters.models.cluster import Cluster +from django.core.management.base import BaseCommand + + +class Command(BaseCommand): + help = "Return cluster configuration of the cluster." + + def add_arguments(self, parser): + parser.add_argument("--config-name", help="Config section.", required=True) + + def get_value(self, data, key, default=None): + if isinstance(data, dict): + return data.get(key, default) + else: + return getattr(data, key, default) + + def handle(self, *args, **options): + config_sections = options.get("config_name").split(".") + cluster = Cluster.objects.current().first() + data = getattr(cluster, config_sections[0], None) + if not data: + sys.stdout.write(json.dumps({})) + return + + if len(config_sections) > 1: + for key in config_sections[1:]: + data = self.get_value(data=data, key=key) + if not data: + sys.stdout.write(json.dumps({})) + return + + sys.stdout.write(json.dumps(data)) diff --git a/src/core/api/app/clusters/management/commands/dump_account_resources.py b/src/core/api/app/clusters/management/commands/dump_account_resources.py new file mode 100644 index 00000000..a78a4f8f --- /dev/null +++ b/src/core/api/app/clusters/management/commands/dump_account_resources.py @@ -0,0 +1,29 @@ +from clusters import workspace +from django.core.management.base import BaseCommand +from projects.models import Environment + +from lib.config_files import print_yamls + + +class Command(BaseCommand): + help = "Dump all the workspace resources that workspace.sync() creates as yaml." + + def add_arguments(self, parser): + parser.add_argument( + "--env", + help="Workspace / environment slug to dump.", + default="dev123", + ) + + def handle(self, *args, **options): + env_slug = options["env"] + env = Environment.objects.filter(slug=env_slug).first() + if not env: + self.stdout.write("Environment not found.") + + account = env.project.account + + res, config_hashes = workspace.gen_account_resources(account, env) + account_obj = workspace.gen_account(account, env, config_hashes) + res.append(account_obj) + print_yamls(res) diff --git a/src/core/api/app/clusters/management/commands/dump_workspace_resources.py b/src/core/api/app/clusters/management/commands/dump_workspace_resources.py new file mode 100644 index 00000000..b80f321d --- /dev/null +++ b/src/core/api/app/clusters/management/commands/dump_workspace_resources.py @@ -0,0 +1,31 @@ +import logging + +from clusters import workspace +from django.core.management.base import BaseCommand +from projects.models import Environment + +from lib.config_files import print_yamls + +logger = logging.getLogger(__name__) + + +class Command(BaseCommand): + help = "Dump all the workspace resources that workspace.sync() creates as yaml." + + def add_arguments(self, parser): + parser.add_argument( + "--env", + help="Workspace / environment slug to dump.", + default="dev123", + ) + + def handle(self, *args, **options): + env_slug = options["env"] + env = Environment.objects.filter(slug=env_slug).first() + if not env: + logger.info("Environment not found.") + + res, config_hashes = workspace.gen_workspace_resources(env) + workspace_res = workspace.gen_workspace(env, config_hashes) + res.append(workspace_res) + print_yamls(res) diff --git a/src/core/api/app/clusters/management/commands/generate_account_slugs.py b/src/core/api/app/clusters/management/commands/generate_account_slugs.py new file mode 100644 index 00000000..f511cda0 --- /dev/null +++ b/src/core/api/app/clusters/management/commands/generate_account_slugs.py @@ -0,0 +1,13 @@ +import json +import sys + +from django.core.management.base import BaseCommand +from users.models import Account + + +class Command(BaseCommand): + help = "Prints active datacoves account slugs." + + def handle(self, *args, **options): + accounts = [account.slug for account in Account.objects.active_accounts()] + sys.stdout.write(json.dumps(accounts)) diff --git a/src/core/api/app/clusters/management/commands/generate_cluster_oidc.py b/src/core/api/app/clusters/management/commands/generate_cluster_oidc.py new file mode 100644 index 00000000..5c236813 --- /dev/null +++ b/src/core/api/app/clusters/management/commands/generate_cluster_oidc.py @@ -0,0 +1,23 @@ +import json +import sys + +from clusters.adapters import EnvironmentAdapter +from django.core.management.base import BaseCommand, CommandParser + + +class Command(BaseCommand): + help = "Dump all the workspace resources that workspace.sync() creates as yaml." + + def add_arguments(self, parser: CommandParser): + parser.add_argument("--name", help="Service name", required=True) + parser.add_argument("--subdomain", help="Subdomain to generate", required=True) + parser.add_argument( + "--path", help="Workspace / environment slug to dump", required=True + ) + + def handle(self, *args, **options): + name = options["name"] + subdomain = options["subdomain"] + path = options["path"] + data: dict = EnvironmentAdapter.get_cluster_oidc_config(name, subdomain, path) + sys.stdout.write(json.dumps(data)) diff --git a/src/core/api/app/clusters/management/commands/generate_service_account.py b/src/core/api/app/clusters/management/commands/generate_service_account.py new file mode 100644 index 00000000..c4660207 --- /dev/null +++ b/src/core/api/app/clusters/management/commands/generate_service_account.py @@ -0,0 +1,42 @@ +import json +import secrets +import sys + +from clusters.models import Cluster +from django.core.management.base import BaseCommand, CommandParser +from rest_framework.authtoken.models import Token +from users.models import User + + +class Command(BaseCommand): + help = "Registers a service account for Rest Api core services." + + def add_arguments(self, parser: CommandParser): + parser.add_argument("--email-sa", help="Service account email", required=True) + + def handle(self, *args, **options): + sa_email = options["email_sa"] + sa_description = "Service account for Rest API core services" + sa_user, _ = User.objects.get_or_create( + email=sa_email, + defaults={ + "is_service_account": True, + "name": sa_description, + }, + ) + password = secrets.token_urlsafe(12) + sa_user.set_password(password) + sa_user.save() + + sa_token, _ = Token.objects.get_or_create(user=sa_user) + sa_data = { + "username": sa_email, + "password": password, + "token": sa_token.key, + "description": sa_description, + } + cluster = Cluster.objects.first() + cluster.service_account.update({"core_api": sa_data}) + cluster.save() + + sys.stdout.write(json.dumps(sa_data)) diff --git a/src/core/api/app/clusters/management/commands/register_cluster.py b/src/core/api/app/clusters/management/commands/register_cluster.py new file mode 100644 index 00000000..69f742e3 --- /dev/null +++ b/src/core/api/app/clusters/management/commands/register_cluster.py @@ -0,0 +1,54 @@ +from pathlib import Path + +from clusters.config_loader.cluster import ClusterConfigLoader +from django.core.management.base import BaseCommand + +from datacoves.settings import to_bool +from lib.config_files import load_file + + +class Command(BaseCommand): + help = "Registers a new cluster by reading a yaml config file." + + def add_arguments(self, parser): + parser.add_argument( + "--config", + help="Path to the configuration directory.", + default="/tmp/config", + ) + parser.add_argument("--envs", help="Comma separated list of environment slugs.") + parser.add_argument( + "--create-default-user", + default="false", + help="Create default user.", + ) + parser.add_argument( + "--user-confirm", + default="true", + help="Requires user confirmation.", + ) + + def handle(self, *args, **options): + envs_arg = options.get("envs") + env_slugs = envs_arg.split(",") if envs_arg else [] + config_dir = Path(options["config"]) + params = load_file(config_dir / "cluster-params") + params_secret = load_file(config_dir / "cluster-params.secret.yaml") + core_db_service_account_ro = params_secret.get( + "core_db_service_account_read_only" + ) + + pricing_yaml = config_dir / "pricing.yaml" + pricing_model = load_file(pricing_yaml) if pricing_yaml.exists() else None + + create_default_user = to_bool(options["create_default_user"]) + req_user_confirm = to_bool(options["user_confirm"]) + + ClusterConfigLoader.load( + params=params, + envs_to_not_bump=env_slugs, + pricing_model=pricing_model, + create_default_user=create_default_user, + core_db_service_account_ro=core_db_service_account_ro, + req_user_confirm=req_user_confirm, + ) diff --git a/src/core/api/app/clusters/management/commands/save_service_account.py b/src/core/api/app/clusters/management/commands/save_service_account.py new file mode 100644 index 00000000..a534814f --- /dev/null +++ b/src/core/api/app/clusters/management/commands/save_service_account.py @@ -0,0 +1,26 @@ +import base64 +import json +import sys + +from clusters.models import Cluster +from django.core.management.base import BaseCommand, CommandParser + + +class Command(BaseCommand): + help = "Save or update a service account on Cluster model." + + def add_arguments(self, parser: CommandParser): + parser.add_argument( + "--json-data-b64", help="Service account data", required=True + ) + + def handle(self, *args, **options): + json_data_b64 = options["json_data_b64"] + json_data_b64_bytes = json_data_b64.encode("utf-8") + json_data = base64.b64decode(json_data_b64_bytes).decode("utf-8") + data = json.loads(json_data) + cluster = Cluster.objects.first() + cluster.service_account.update(data) + cluster.save() + + sys.stdout.write("The service account was saved successfully.") diff --git a/src/core/api/app/clusters/management/commands/sync_periodic_tasks.py b/src/core/api/app/clusters/management/commands/sync_periodic_tasks.py new file mode 100644 index 00000000..055f59a8 --- /dev/null +++ b/src/core/api/app/clusters/management/commands/sync_periodic_tasks.py @@ -0,0 +1,25 @@ +from celery import current_app +from django.core.management.base import BaseCommand +from django_celery_beat.models import PeriodicTask + + +class Command(BaseCommand): + help = "Sync periodic tasks with CELERY_BEAT_SCHEDULE" + + def handle(self, *args, **kwargs): + # Getting tasks on CELERY_BEAT_SCHEDULE + defined_tasks = set(current_app.conf.beat_schedule.keys()) + defined_tasks.add("celery.backend_cleanup") + + # Getting tasks on the database + existing_tasks = PeriodicTask.objects.values_list("name", flat=True) + + # Task to delete + tasks_to_delete = set(existing_tasks) - defined_tasks + + # Deleting tasks not in on CELERY_BEAT_SCHEDULE + if tasks_to_delete: + PeriodicTask.objects.filter(name__in=tasks_to_delete).delete() + self.style.SUCCESS(f"Periodic tasks deleted: {tasks_to_delete}") + else: + self.style.SUCCESS("There are no task to delete.") diff --git a/src/core/api/app/clusters/management/commands/upgrade_cluster.py b/src/core/api/app/clusters/management/commands/upgrade_cluster.py new file mode 100644 index 00000000..01f5e966 --- /dev/null +++ b/src/core/api/app/clusters/management/commands/upgrade_cluster.py @@ -0,0 +1,35 @@ +import logging + +from clusters.models import Cluster, ClusterUpgrade +from django.core.management.base import BaseCommand, CommandParser +from django.db import connection + +logger = logging.getLogger(__name__) + + +class Command(BaseCommand): + help = "Registers a cluster upgrade." + + def add_arguments(self, parser: CommandParser): + parser.add_argument("--release-name", help="Release name", required=True) + parser.add_argument("--triggered-by", help="Upgrade author", required=True) + + def handle(self, *args, **options): + if "clusters_clusterupgrade" not in connection.introspection.table_names(): + logger.info( + "ClusterUpgrade table not created yet, aborting cluster upgrade registration." + ) + return + + cluster = Cluster.objects.current().only("pk").first() + if cluster: + release_name = options["release_name"] + triggered_by = options["triggered_by"] + ClusterUpgrade.objects.create( + cluster_id=cluster.id, + release_name=release_name, + triggered_by=triggered_by, + ) + logger.info("Cluster upgrade registered successfully.") + else: + logger.info("Cluster upgrade not registered since no cluster record found.") diff --git a/src/core/api/app/clusters/metrics/__init__.py b/src/core/api/app/clusters/metrics/__init__.py new file mode 100644 index 00000000..55448ddc --- /dev/null +++ b/src/core/api/app/clusters/metrics/__init__.py @@ -0,0 +1,2 @@ +from .metrics import gen_prometheus_metrics # noqa +from .middleware import DatacovesPrometheusMetricMiddleware # noqa diff --git a/src/core/api/app/clusters/metrics/metrics.py b/src/core/api/app/clusters/metrics/metrics.py new file mode 100644 index 00000000..cea97a2c --- /dev/null +++ b/src/core/api/app/clusters/metrics/metrics.py @@ -0,0 +1,206 @@ +import subprocess +from enum import Enum + +from django.core.cache import cache +from django.db.models import F +from django_prometheus.conf import NAMESPACE +from projects.models.environment import Environment +from prometheus_client import Info +from users.models.account import Account + +from lib.utils import Timer, cache_result + +NS = NAMESPACE or "datacoves" + + +class MetricsCacheKeyEnum(Enum): + """Enum for cache keys used in metrics generation.""" + + ACCOUNT_INFO = "datacoves_prometheus_metrics_account_info" + ENVIRONMENT_INFO = "datacoves_prometheus_metrics_environment_info" + HELM_CHART_INFO = "datacoves_prometheus_metrics_helm_chart_info" + + +class Metrics: + _instance = None + + @classmethod + def get_instance(cls): + if not cls._instance: + cls._instance = cls() + return cls._instance + + def register_metric(self, metric_cls, name, documentation, labelnames=(), **kwargs): + return metric_cls(name, documentation, labelnames=labelnames, **kwargs) + + def __init__(self, *args, **kwargs): + self.register() + + def register(self): + self.account_info = self.register_metric( + Info, + "account", + "Datacoves account details.", + labelnames=( + "name", + "slug", + "developer_licenses", + "remaining_trial_days", + "is_active", + "is_on_trial", + "is_subscribed", + ), + namespace=NS, + ) + self.environment_info = self.register_metric( + Info, + "environment", + "Datacoves environment details.", + labelnames=( + "slug", + "name", + "update_strategy", + "account_name", + "account_slug", + "project_name", + "project_slug", + "release_name", + ), + namespace=NS, + ) + self.helm_chart_info = self.register_metric( + Info, + "helm_chart", + "Datacoves Helm details.", + labelnames=( + "name", + "ns", + "revision", + "updated", + "status", + "chart", + "app_version", + ), + namespace=NS, + ) + + def clear(self): + # In these cases we want to clean up old records. + self.account_info.clear() + self.environment_info.clear() + self.helm_chart_info.clear() + + def get_prometheus_metric_cached( + self, metric: Info, cache_key: MetricsCacheKeyEnum + ): + metrics = cache.get(cache_key.value) + if metrics: + for m in metrics: + metric.labels(**m) + + +@cache_result(key_prefix=MetricsCacheKeyEnum.ACCOUNT_INFO.value) +def _gen_account_info() -> list: + """Get accounts details + + Args: + metric (Info): Metric object + """ + accounts = Account.objects.only( + "name", + "slug", + "developer_licenses", + "created_by", + "trial_ends_at", + "deactivated_at", + "subscription", + ) + + metrics = [] + for account in accounts: + labels = { + "name": account.name, + "slug": account.slug, + "developer_licenses": account.developer_licenses, + "remaining_trial_days": account.remaining_trial_days, + "is_active": account.is_active, + "is_on_trial": account.is_on_trial, + "is_subscribed": account.is_subscribed, + } + metrics.append(labels) + + return metrics + + +@cache_result(key_prefix=MetricsCacheKeyEnum.ENVIRONMENT_INFO.value) +def _gen_environment_info() -> list: + """Get environments details + + Args: + metric (Info): Metric object + """ + envs = ( + Environment.objects.only( + "slug", + "name", + "update_strategy", + "project__account__name", + "project__account__slug", + "project__name", + "project__slug", + "release__name", + ) + .annotate( + account_name=F("project__account__name"), + account_slug=F("project__account__slug"), + project_name=F("project__name"), + project_slug=F("project__slug"), + release_name=F("release__name"), + ) + .values( + "slug", + "name", + "update_strategy", + "account_name", + "account_slug", + "project_name", + "project_slug", + "release_name", + ) + ) + + metrics = [env for env in envs] + return metrics + + +@cache_result(key_prefix=MetricsCacheKeyEnum.HELM_CHART_INFO.value) +def _gen_helm_chart_info() -> list: + """Get helm chart details + + Args: + metric (Info): Metric object + """ + kwargs = {} + kwargs["stdout"] = subprocess.PIPE + kwargs["stderr"] = subprocess.PIPE + + output = subprocess.run(["helm", "list", "-A"], **kwargs) + output = output.stdout.decode("utf-8").split("\n") + lines = [] + for line in output[:-1]: + item = line.split("\t") + lines.append(list(map(lambda x: x.strip(), item))) + + headers = list(map(lambda x: x.lower().replace(" ", "_"), lines[0])) + # Overwrite to avoid default origin namespace + idx = headers.index("namespace") + headers[idx] = "ns" + metrics = [dict(zip(headers, line)) for line in lines[1:]] + return metrics + + +def gen_prometheus_metrics(): + with Timer("datacoves.metrics.gen_prometheus_metrics"): + _ = _gen_account_info() + _ = _gen_environment_info() + _ = _gen_helm_chart_info() diff --git a/src/core/api/app/clusters/metrics/middleware.py b/src/core/api/app/clusters/metrics/middleware.py new file mode 100644 index 00000000..dfa97c8c --- /dev/null +++ b/src/core/api/app/clusters/metrics/middleware.py @@ -0,0 +1,45 @@ +from django.http import HttpResponseForbidden +from rest_framework.authentication import TokenAuthentication + +from .metrics import Metrics as Mec +from .metrics import MetricsCacheKeyEnum + + +class DatacovesPrometheusMetricMiddleware: + metrics_cls = Mec + + def __init__(self, get_response): + self.get_response = get_response + self.metrics = self.metrics_cls.get_instance() + self.auth = TokenAuthentication() + + def __call__(self, request): + if request.path == "/metrics": + auth_header = request.META.get("HTTP_AUTHORIZATION", "") + + # Change 'Bearer' instead of 'Token' + if auth_header.startswith("Bearer "): + request.META["HTTP_AUTHORIZATION"] = auth_header.replace( + "Bearer", "Token", 1 + ) + + user_auth_tuple = self.auth.authenticate(request) + if user_auth_tuple is None: + return HttpResponseForbidden("Invalid token") + + self.metrics.clear() + + self.metrics.get_prometheus_metric_cached( + metric=self.metrics.account_info, + cache_key=MetricsCacheKeyEnum.ACCOUNT_INFO, + ) + self.metrics.get_prometheus_metric_cached( + metric=self.metrics.environment_info, + cache_key=MetricsCacheKeyEnum.ENVIRONMENT_INFO, + ) + self.metrics.get_prometheus_metric_cached( + metric=self.metrics.helm_chart_info, + cache_key=MetricsCacheKeyEnum.HELM_CHART_INFO, + ) + + return self.get_response(request) diff --git a/src/core/api/app/clusters/metrics/signals.py b/src/core/api/app/clusters/metrics/signals.py new file mode 100644 index 00000000..5a63891b --- /dev/null +++ b/src/core/api/app/clusters/metrics/signals.py @@ -0,0 +1,22 @@ +from django.core.cache import cache +from django.db.models.signals import post_delete, post_save +from django.dispatch import receiver +from projects.models.environment import Environment +from users.models import Account + +from .metrics import MetricsCacheKeyEnum + + +@receiver(post_save, sender=Account) +@receiver(post_delete, sender=Account) +def clear_account_cache(sender, **kwargs): + cache.delete(MetricsCacheKeyEnum.ACCOUNT_INFO.value) + cache.delete(MetricsCacheKeyEnum.ENVIRONMENT_INFO.value) + cache.delete(MetricsCacheKeyEnum.HELM_CHART_INFO.value) + + +@receiver(post_save, sender=Environment) +@receiver(post_delete, sender=Environment) +def clear_environment_cache(sender, **kwargs): + cache.delete(MetricsCacheKeyEnum.ENVIRONMENT_INFO.value) + cache.delete(MetricsCacheKeyEnum.HELM_CHART_INFO.value) diff --git a/src/core/api/app/clusters/migrations/0001_initial.py b/src/core/api/app/clusters/migrations/0001_initial.py new file mode 100644 index 00000000..2ad9c070 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0001_initial.py @@ -0,0 +1,43 @@ +# Generated by Django 3.2.6 on 2022-03-03 15:15 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Cluster', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('domain', models.CharField(max_length=253, unique=True)), + ('provider', models.CharField(choices=[('eks', 'EKS (Amazon)'), ('gke', 'GKE (Google)'), ('aks', 'AKS (Azure)'), ('kind', 'Kind (local)')], default='kind', max_length=20)), + ('cert_manager_issuer', models.CharField(blank=True, max_length=253, null=True)), + ('external_dns_url', models.URLField(blank=True, null=True)), + ('extra_images', models.JSONField(blank=True, default=list, null=True)), + ('airbyte_logs_external', models.BooleanField(default=False)), + ('airbyte_logs_provisioner', core.fields.EncryptedJSONField(default=dict)), + ('airbyte_db_external', models.BooleanField(default=False)), + ('airbyte_db_provisioner', core.fields.EncryptedJSONField(default=dict)), + ('airflow_logs_external', models.BooleanField(default=False)), + ('airflow_logs_provisioner', core.fields.EncryptedJSONField(default=dict)), + ('airflow_db_external', models.BooleanField(default=False)), + ('airflow_db_provisioner', core.fields.EncryptedJSONField(default=dict)), + ('superset_db_external', models.BooleanField(default=False)), + ('superset_db_provisioner', core.fields.EncryptedJSONField(default=dict)), + ('fluentd_store_external', models.BooleanField(default=False)), + ('fluentd_store_provisioner', core.fields.EncryptedJSONField(default=dict)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/clusters/migrations/0002_cluster_kubernetes_version.py b/src/core/api/app/clusters/migrations/0002_cluster_kubernetes_version.py new file mode 100644 index 00000000..494fd7cc --- /dev/null +++ b/src/core/api/app/clusters/migrations/0002_cluster_kubernetes_version.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-03-10 13:49 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='kubernetes_version', + field=models.CharField(default='1.20.7', max_length=40), + preserve_default=False, + ), + ] diff --git a/src/core/api/app/clusters/migrations/0003_cluster_features_enabled.py b/src/core/api/app/clusters/migrations/0003_cluster_features_enabled.py new file mode 100644 index 00000000..06a025c6 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0003_cluster_features_enabled.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-05-12 16:52 + +import clusters.models.cluster +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0002_cluster_kubernetes_version'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='features_enabled', + field=models.JSONField(blank=True, default=clusters.models.cluster.default_features_enabled, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0004_auto_20220517_1305.py b/src/core/api/app/clusters/migrations/0004_auto_20220517_1305.py new file mode 100644 index 00000000..2f2edf07 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0004_auto_20220517_1305.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-05-17 13:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0003_cluster_features_enabled'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='internal_dns_ip', + field=models.CharField(blank=True, max_length=15, null=True), + ), + migrations.AddField( + model_name='cluster', + name='internal_dns_url', + field=models.URLField(blank=True, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0005_auto_20220517_1326.py b/src/core/api/app/clusters/migrations/0005_auto_20220517_1326.py new file mode 100644 index 00000000..9e525638 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0005_auto_20220517_1326.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-05-17 13:26 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0004_auto_20220517_1305'), + ] + + operations = [ + migrations.AlterField( + model_name='cluster', + name='external_dns_url', + field=models.CharField(blank=True, max_length=253, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='internal_dns_url', + field=models.CharField(blank=True, max_length=253, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0006_cluster_release_channel.py b/src/core/api/app/clusters/migrations/0006_cluster_release_channel.py new file mode 100644 index 00000000..0c844841 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0006_cluster_release_channel.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-05-19 00:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0005_auto_20220517_1326'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='release_channel', + field=models.CharField(default='edge', max_length=20), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0007_auto_20220531_1706.py b/src/core/api/app/clusters/migrations/0007_auto_20220531_1706.py new file mode 100644 index 00000000..be432a77 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0007_auto_20220531_1706.py @@ -0,0 +1,48 @@ +# Generated by Django 3.2.6 on 2022-05-31 17:06 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0006_cluster_release_channel'), + ] + + operations = [ + migrations.RemoveField( + model_name='cluster', + name='airbyte_db_provisioner', + ), + migrations.RemoveField( + model_name='cluster', + name='airbyte_logs_provisioner', + ), + migrations.RemoveField( + model_name='cluster', + name='airflow_db_provisioner', + ), + migrations.RemoveField( + model_name='cluster', + name='airflow_logs_provisioner', + ), + migrations.RemoveField( + model_name='cluster', + name='fluentd_store_provisioner', + ), + migrations.RemoveField( + model_name='cluster', + name='superset_db_provisioner', + ), + migrations.AddField( + model_name='cluster', + name='blob_storage_provisioner', + field=core.fields.EncryptedJSONField(default=dict, editable=True), + ), + migrations.AddField( + model_name='cluster', + name='postgres_db_provisioner', + field=core.fields.EncryptedJSONField(default=dict, editable=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0008_auto_20220531_1949.py b/src/core/api/app/clusters/migrations/0008_auto_20220531_1949.py new file mode 100644 index 00000000..a546b73f --- /dev/null +++ b/src/core/api/app/clusters/migrations/0008_auto_20220531_1949.py @@ -0,0 +1,24 @@ +# Generated by Django 3.2.6 on 2022-05-31 19:49 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0007_auto_20220531_1706'), + ] + + operations = [ + migrations.AlterField( + model_name='cluster', + name='blob_storage_provisioner', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='postgres_db_provisioner', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0009_cluster_internal_db_cluster_ip_range.py b/src/core/api/app/clusters/migrations/0009_cluster_internal_db_cluster_ip_range.py new file mode 100644 index 00000000..cb13ab7c --- /dev/null +++ b/src/core/api/app/clusters/migrations/0009_cluster_internal_db_cluster_ip_range.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-06-01 21:15 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0008_auto_20220531_1949'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='internal_db_cluster_ip_range', + field=models.CharField(blank=True, max_length=18, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0010_rename_blob_storage_provisioner_cluster_s3_provisioner.py b/src/core/api/app/clusters/migrations/0010_rename_blob_storage_provisioner_cluster_s3_provisioner.py new file mode 100644 index 00000000..424bb2ea --- /dev/null +++ b/src/core/api/app/clusters/migrations/0010_rename_blob_storage_provisioner_cluster_s3_provisioner.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-06-06 14:23 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0009_cluster_internal_db_cluster_ip_range'), + ] + + operations = [ + migrations.RenameField( + model_name='cluster', + old_name='blob_storage_provisioner', + new_name='s3_provisioner', + ), + ] diff --git a/src/core/api/app/clusters/migrations/0011_remove_cluster_fluentd_store_external.py b/src/core/api/app/clusters/migrations/0011_remove_cluster_fluentd_store_external.py new file mode 100644 index 00000000..855a9cf6 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0011_remove_cluster_fluentd_store_external.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-06-07 20:19 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0010_rename_blob_storage_provisioner_cluster_s3_provisioner'), + ] + + operations = [ + migrations.RemoveField( + model_name='cluster', + name='fluentd_store_external', + ), + ] diff --git a/src/core/api/app/clusters/migrations/0012_cluster_dont_use_uwsgi.py b/src/core/api/app/clusters/migrations/0012_cluster_dont_use_uwsgi.py new file mode 100644 index 00000000..f6cc1f59 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0012_cluster_dont_use_uwsgi.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-06-22 15:24 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0011_remove_cluster_fluentd_store_external'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='dont_use_uwsgi', + field=models.BooleanField(default=False), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0013_auto_20220728_1450.py b/src/core/api/app/clusters/migrations/0013_auto_20220728_1450.py new file mode 100644 index 00000000..abc2affa --- /dev/null +++ b/src/core/api/app/clusters/migrations/0013_auto_20220728_1450.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-07-28 14:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0012_cluster_dont_use_uwsgi'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='external_ip', + field=models.CharField(blank=True, max_length=15, null=True), + ), + migrations.AddField( + model_name='cluster', + name='internal_ip', + field=models.CharField(blank=True, max_length=15, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0014_cluster_release.py b/src/core/api/app/clusters/migrations/0014_cluster_release.py new file mode 100644 index 00000000..53a54784 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0014_cluster_release.py @@ -0,0 +1,21 @@ +# Generated by Django 3.2.6 on 2022-09-20 20:02 + +import clusters.models.cluster +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0066_connection_connection_user'), + ('clusters', '0013_auto_20220728_1450'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='release', + field=models.ForeignKey(default=clusters.models.cluster.default_release, on_delete=django.db.models.deletion.PROTECT, related_name='clusters', to='projects.release'), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0015_auto_20220920_2217.py b/src/core/api/app/clusters/migrations/0015_auto_20220920_2217.py new file mode 100644 index 00000000..d2a45d88 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0015_auto_20220920_2217.py @@ -0,0 +1,29 @@ +# Generated by Django 3.2.6 on 2022-09-20 22:17 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0014_cluster_release'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='airbyte_logs_external_backend', + field=models.CharField(choices=[('s3', 'S3')], default='s3', help_text='Default logs backend when airbyte_logs_external is True', max_length=20), + ), + migrations.AddField( + model_name='cluster', + name='airflow_logs_external_backend', + field=models.CharField(choices=[('s3', 'S3'), ('efs', 'EFS')], default='s3', help_text='Default logs backend when airflow_logs_external is True', max_length=20), + ), + migrations.AddField( + model_name='cluster', + name='efs_provisioner', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0016_cluster_limits.py b/src/core/api/app/clusters/migrations/0016_cluster_limits.py new file mode 100644 index 00000000..77470b04 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0016_cluster_limits.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-10-19 20:25 + +import clusters.models.cluster +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0015_auto_20220920_2217'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='limits', + field=models.JSONField(blank=True, default=clusters.models.cluster.default_limits, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0017_auto_20221223_1323.py b/src/core/api/app/clusters/migrations/0017_auto_20221223_1323.py new file mode 100644 index 00000000..9af3973b --- /dev/null +++ b/src/core/api/app/clusters/migrations/0017_auto_20221223_1323.py @@ -0,0 +1,30 @@ +# Generated by Django 3.2.6 on 2022-12-23 13:23 + +import clusters.models.cluster +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0016_cluster_limits'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='docker_config', + field=core.fields.EncryptedJSONField(blank=True, default=clusters.models.cluster.default_docker_config, editable=True, null=True), + ), + migrations.AddField( + model_name='cluster', + name='docker_config_secret_name', + field=models.CharField(blank=True, default='docker-config-datacovesprivate', max_length=253, null=True), + ), + migrations.AddField( + model_name='cluster', + name='docker_registry', + field=models.CharField(blank=True, max_length=253), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0017_clusteralert.py b/src/core/api/app/clusters/migrations/0017_clusteralert.py new file mode 100644 index 00000000..69630392 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0017_clusteralert.py @@ -0,0 +1,27 @@ +# Generated by Django 3.2.6 on 2022-12-29 20:17 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0070_blockedpodcreationrequest'), + ('clusters', '0016_cluster_limits'), + ] + + operations = [ + migrations.CreateModel( + name='ClusterAlert', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('name', models.CharField(max_length=100)), + ('namespace', models.CharField(blank=True, max_length=63, null=True)), + ('summary', models.TextField()), + ('data', models.JSONField()), + ('environment', models.ForeignKey(null=True, blank=True, on_delete=django.db.models.deletion.CASCADE, to='projects.environment')), + ], + ), + ] diff --git a/src/core/api/app/clusters/migrations/0018_merge_0017_auto_20221223_1323_0017_clusteralert.py b/src/core/api/app/clusters/migrations/0018_merge_0017_auto_20221223_1323_0017_clusteralert.py new file mode 100644 index 00000000..0c1e5e97 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0018_merge_0017_auto_20221223_1323_0017_clusteralert.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.16 on 2022-12-30 16:59 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0017_auto_20221223_1323'), + ('clusters', '0017_clusteralert'), + ] + + operations = [ + ] diff --git a/src/core/api/app/clusters/migrations/0019_auto_20230120_1821.py b/src/core/api/app/clusters/migrations/0019_auto_20230120_1821.py new file mode 100644 index 00000000..b15da51e --- /dev/null +++ b/src/core/api/app/clusters/migrations/0019_auto_20230120_1821.py @@ -0,0 +1,36 @@ +# Generated by Django 3.2.16 on 2023-01-20 18:21 + +import django.db.models.deletion +from django.db import migrations, models + + +def associate_cluster_alerts(apps, schema_editor): + Cluster = apps.get_model("clusters", "Cluster") + ClusterAlert = apps.get_model("clusters", "ClusterAlert") + if ClusterAlert.objects.count() > 0: + cluster = Cluster.objects.current().first() + ClusterAlert.objects.update(cluster_id=cluster.id) + + +class Migration(migrations.Migration): + dependencies = [ + ("clusters", "0018_merge_0017_auto_20221223_1323_0017_clusteralert"), + ] + + operations = [ + migrations.RemoveField( + model_name="clusteralert", + name="summary", + ), + migrations.AddField( + model_name="clusteralert", + name="cluster", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + to="clusters.cluster", + ), + ), + migrations.RunPython(associate_cluster_alerts), + ] diff --git a/src/core/api/app/clusters/migrations/0020_clusteralert_resolved.py b/src/core/api/app/clusters/migrations/0020_clusteralert_resolved.py new file mode 100644 index 00000000..2ba86539 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0020_clusteralert_resolved.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-02-08 08:34 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0019_auto_20230120_1821'), + ] + + operations = [ + migrations.AddField( + model_name='clusteralert', + name='resolved', + field=models.BooleanField(default=False), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0021_auto_20230210_2255.py b/src/core/api/app/clusters/migrations/0021_auto_20230210_2255.py new file mode 100644 index 00000000..893f25c5 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0021_auto_20230210_2255.py @@ -0,0 +1,33 @@ +# Generated by Django 3.2.16 on 2023-02-10 22:55 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0020_clusteralert_resolved'), + ] + + operations = [ + migrations.AddField( + model_name='clusteralert', + name='status', + field=models.CharField(blank=True, choices=[('firing', 'Firing'), ('resolved', 'Resolved')], max_length=10, null=True), + ), + migrations.CreateModel( + name='ClusterAlertGroup', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('data', models.JSONField()), + ('cluster', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='clusters.cluster')), + ], + ), + migrations.AddField( + model_name='clusteralert', + name='group', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='clusters.clusteralertgroup'), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0022_auto_20230211_0057.py b/src/core/api/app/clusters/migrations/0022_auto_20230211_0057.py new file mode 100644 index 00000000..e9f86f5e --- /dev/null +++ b/src/core/api/app/clusters/migrations/0022_auto_20230211_0057.py @@ -0,0 +1,30 @@ +# Generated by Django 3.2.16 on 2023-02-11 00:57 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0073_profile_files_from'), + ('clusters', '0021_auto_20230210_2255'), + ] + + operations = [ + migrations.AddField( + model_name='clusteralertgroup', + name='environment', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='projects.environment'), + ), + migrations.AddField( + model_name='clusteralertgroup', + name='name', + field=models.CharField(blank=True, max_length=100, null=True), + ), + migrations.AddField( + model_name='clusteralertgroup', + name='namespace', + field=models.CharField(blank=True, max_length=63, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0023_auto_20230227_1833.py b/src/core/api/app/clusters/migrations/0023_auto_20230227_1833.py new file mode 100644 index 00000000..e39f86c7 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0023_auto_20230227_1833.py @@ -0,0 +1,24 @@ +# Generated by Django 3.2.16 on 2023-02-27 18:33 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("clusters", "0022_auto_20230211_0057"), + ] + + operations = [ + migrations.RemoveField( + model_name="clusteralert", + name="group", + ), + migrations.AddField( + model_name="clusteralert", + name="started_at", + field=models.DateTimeField(), + ), + migrations.DeleteModel( + name="ClusterAlertGroup", + ), + ] diff --git a/src/core/api/app/clusters/migrations/0024_cluster_grafana_settings.py b/src/core/api/app/clusters/migrations/0024_cluster_grafana_settings.py new file mode 100644 index 00000000..ad4a1ece --- /dev/null +++ b/src/core/api/app/clusters/migrations/0024_cluster_grafana_settings.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.16 on 2023-03-06 20:13 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0023_auto_20230227_1833'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='grafana_settings', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0025_auto_20230314_1211.py b/src/core/api/app/clusters/migrations/0025_auto_20230314_1211.py new file mode 100644 index 00000000..b2a85e3b --- /dev/null +++ b/src/core/api/app/clusters/migrations/0025_auto_20230314_1211.py @@ -0,0 +1,44 @@ +# Generated by Django 3.2.16 on 2023-03-14 12:11 + +import clusters.models.cluster +import core.fields +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("clusters", "0024_cluster_grafana_settings"), + ] + + operations = [ + migrations.AddField( + model_name="cluster", + name="alert_system_settings", + field=models.JSONField( + blank=True, + default=clusters.models.cluster.default_alert_system_settings, + help_text='Alert system settings, such as muted notifs ({"muted_notifications": [{"namespace": "cloudwatch", "name": "ContainerCpuUsage", "channel": "slack"}]}).', + null=True, + ), + ), + migrations.AlterField( + model_name="cluster", + name="grafana_settings", + field=core.fields.EncryptedJSONField( + blank=True, + default=dict, + editable=True, + help_text="Grafana settings, such as OIDC secrets", + null=True, + ), + ), + migrations.AlterField( + model_name="clusteralert", + name="cluster", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, to="clusters.cluster" + ), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0026_alter_cluster_alert_system_settings.py b/src/core/api/app/clusters/migrations/0026_alter_cluster_alert_system_settings.py new file mode 100644 index 00000000..0d48e617 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0026_alter_cluster_alert_system_settings.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.16 on 2023-06-14 19:25 + +import clusters.models.cluster +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0025_auto_20230314_1211'), + ] + + operations = [ + migrations.AlterField( + model_name='cluster', + name='alert_system_settings', + field=models.JSONField(blank=True, default=clusters.models.cluster.default_alert_system_settings, help_text='Alert system settings, such as muted notifs ({"muted_notifications": [{"namespace": "cloudwatch", "pod": "~worker-.*", "name": "=ContainerCpuUsage", "channel": "slack"}]}).', null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0027_cluster_api_server_ips.py b/src/core/api/app/clusters/migrations/0027_cluster_api_server_ips.py new file mode 100644 index 00000000..6bc6abc7 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0027_cluster_api_server_ips.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-09-27 23:08 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0026_alter_cluster_alert_system_settings'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='api_server_ips', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0028_auto_20231023_1751.py b/src/core/api/app/clusters/migrations/0028_auto_20231023_1751.py new file mode 100644 index 00000000..a39f3dac --- /dev/null +++ b/src/core/api/app/clusters/migrations/0028_auto_20231023_1751.py @@ -0,0 +1,30 @@ +# Generated by Django 3.2.16 on 2023-10-23 17:51 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0027_cluster_api_server_ips'), + ] + + operations = [ + migrations.RemoveField( + model_name='cluster', + name='airbyte_db_external', + ), + migrations.RemoveField( + model_name='cluster', + name='airbyte_logs_external', + ), + migrations.RemoveField( + model_name='cluster', + name='airbyte_logs_external_backend', + ), + migrations.AddField( + model_name='cluster', + name='airbyte_config', + field=models.JSONField(default=dict), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0029_auto_20231101_2309.py b/src/core/api/app/clusters/migrations/0029_auto_20231101_2309.py new file mode 100644 index 00000000..63556abd --- /dev/null +++ b/src/core/api/app/clusters/migrations/0029_auto_20231101_2309.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.20 on 2023-11-01 23:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0028_auto_20231023_1751'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='code_server_config', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='airbyte_config', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0030_auto_20231207_1901.py b/src/core/api/app/clusters/migrations/0030_auto_20231207_1901.py new file mode 100644 index 00000000..d845aa16 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0030_auto_20231207_1901.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.20 on 2023-12-07 19:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0029_auto_20231101_2309'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='airflow_config', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AddField( + model_name='cluster', + name='superset_config', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0031_clusterupgrade.py b/src/core/api/app/clusters/migrations/0031_clusterupgrade.py new file mode 100644 index 00000000..a3c24b06 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0031_clusterupgrade.py @@ -0,0 +1,25 @@ +# Generated by Django 3.2.20 on 2023-12-14 14:25 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0030_auto_20231207_1901'), + ] + + operations = [ + migrations.CreateModel( + name='ClusterUpgrade', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('release_name', models.CharField(max_length=200)), + ('started_at', models.DateTimeField(auto_now_add=True)), + ('finished_at', models.DateTimeField(blank=True, editable=False, null=True)), + ('triggered_by', models.CharField(blank=True, max_length=200, null=True)), + ('cluster', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='upgrades', to='clusters.cluster')), + ], + ), + ] diff --git a/src/core/api/app/clusters/migrations/0032_cluster_celery_heartbeat_at.py b/src/core/api/app/clusters/migrations/0032_cluster_celery_heartbeat_at.py new file mode 100644 index 00000000..29641815 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0032_cluster_celery_heartbeat_at.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-01-26 14:51 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0031_clusterupgrade'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='celery_heartbeat_at', + field=models.DateTimeField(blank=True, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0033_cluster_settings.py b/src/core/api/app/clusters/migrations/0033_cluster_settings.py new file mode 100644 index 00000000..179cb5ee --- /dev/null +++ b/src/core/api/app/clusters/migrations/0033_cluster_settings.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.20 on 2024-02-23 15:55 + +import clusters.models.cluster +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0032_cluster_celery_heartbeat_at'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='settings', + field=models.JSONField(blank=True, default=clusters.models.cluster.default_cluster_settings, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0034_alter_cluster_airflow_logs_external_backend.py b/src/core/api/app/clusters/migrations/0034_alter_cluster_airflow_logs_external_backend.py new file mode 100644 index 00000000..8129adf9 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0034_alter_cluster_airflow_logs_external_backend.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-03-14 20:13 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0033_cluster_settings'), + ] + + operations = [ + migrations.AlterField( + model_name='cluster', + name='airflow_logs_external_backend', + field=models.CharField(choices=[('s3', 'S3'), ('efs', 'EFS'), ('afs', 'AFS'), ('nfs', 'NFS')], default='s3', help_text='Default logs backend when airflow_logs_external is True', max_length=20), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0035_alter_cluster_settings.py b/src/core/api/app/clusters/migrations/0035_alter_cluster_settings.py new file mode 100644 index 00000000..f9e6d15f --- /dev/null +++ b/src/core/api/app/clusters/migrations/0035_alter_cluster_settings.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.20 on 2024-04-02 01:01 + +import clusters.models.cluster +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0034_alter_cluster_airflow_logs_external_backend'), + ] + + operations = [ + migrations.AlterField( + model_name='cluster', + name='settings', + field=models.JSONField(blank=True, default=clusters.models.cluster.default_cluster_settings, help_text="Configures 'admin_panel_color' (CSS color code) and 'code_server_inactivity_threshold' (minutes)", null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0036_cluster_datahub_config.py b/src/core/api/app/clusters/migrations/0036_cluster_datahub_config.py new file mode 100644 index 00000000..398bc353 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0036_cluster_datahub_config.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-06-12 02:11 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0035_alter_cluster_settings'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='datahub_config', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0036_cluster_service_account.py b/src/core/api/app/clusters/migrations/0036_cluster_service_account.py new file mode 100644 index 00000000..bc331d33 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0036_cluster_service_account.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.20 on 2024-06-05 22:55 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0035_alter_cluster_settings'), + ] + + operations = [ + migrations.AddField( + model_name='cluster', + name='service_account', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/clusters/migrations/0037_auto_20240610_1936.py b/src/core/api/app/clusters/migrations/0037_auto_20240610_1936.py new file mode 100644 index 00000000..c75aef06 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0037_auto_20240610_1936.py @@ -0,0 +1,29 @@ +# Generated by Django 3.2.20 on 2024-06-10 19:36 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0036_cluster_service_account'), + ] + + operations = [ + migrations.RemoveField( + model_name='cluster', + name='airflow_db_external', + ), + migrations.RemoveField( + model_name='cluster', + name='airflow_logs_external', + ), + migrations.RemoveField( + model_name='cluster', + name='airflow_logs_external_backend', + ), + migrations.RemoveField( + model_name='cluster', + name='superset_db_external', + ), + ] diff --git a/src/core/api/app/clusters/migrations/0038_merge_20240617_1505.py b/src/core/api/app/clusters/migrations/0038_merge_20240617_1505.py new file mode 100644 index 00000000..4918c258 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0038_merge_20240617_1505.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.20 on 2024-06-17 15:05 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0036_cluster_datahub_config'), + ('clusters', '0037_auto_20240610_1936'), + ] + + operations = [ + ] diff --git a/src/core/api/app/clusters/migrations/0039_auto_20240701_2118.py b/src/core/api/app/clusters/migrations/0039_auto_20240701_2118.py new file mode 100644 index 00000000..5eaf2be3 --- /dev/null +++ b/src/core/api/app/clusters/migrations/0039_auto_20240701_2118.py @@ -0,0 +1,172 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +import clusters.models.cluster +import core.fields +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0105_auto_20240701_2118'), + ('clusters', '0038_merge_20240617_1505'), + ] + + operations = [ + migrations.AlterField( + model_name='cluster', + name='airbyte_config', + field=models.JSONField(blank=True, default=dict, help_text="Default AirByte configuration. This can be overridden per-environment. It is a dictionary, typically with 'db' and 'logs' keys mapping to dictionaries with configuration for both.", null=True), + ), + migrations.AlterField( + model_name='cluster', + name='airflow_config', + field=models.JSONField(blank=True, default=dict, help_text='Default AirFlow configuration. This can be overridden per-environment. It is a dictionary, usually empty at this level.', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='api_server_ips', + field=models.JSONField(blank=True, default=dict, help_text="A JSON dictionary with two keys in it; 'ips' and 'ports'. 'ips' is a list of internal DNS IPs as strings, and 'ports' is the corresponding list of port numbers as integers.", null=True), + ), + migrations.AlterField( + model_name='cluster', + name='celery_heartbeat_at', + field=models.DateTimeField(blank=True, help_text='Last time Celery reported in; not normally edited by users', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='cert_manager_issuer', + field=models.CharField(blank=True, help_text='Sets the cert-manager.io/cluster-issuer annotation on the cluster ingress - https://cert-manager.io/docs/configuration/issuers/', max_length=253, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='code_server_config', + field=models.JSONField(blank=True, default=dict, help_text="Default Code Server configuration. This can be overridden per-environment. It is a dictionary which typically has a 'resources' dictionary of Kubernetes resource allocations, an 'overprovisioning' dictionary which has settings for hot spares, and finally a key 'max_code_server_pods_per_node'. This is not an exhaustive list.", null=True), + ), + migrations.AlterField( + model_name='cluster', + name='datahub_config', + field=models.JSONField(blank=True, default=dict, help_text='Default DataHub configuration. This can be overriden per-environment. It is a dictionary, usually empty at this level.', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='docker_config', + field=core.fields.EncryptedJSONField(blank=True, default=clusters.models.cluster.default_docker_config, editable=True, help_text="If blank, then core-api is not responsible for creating the Docker config secret; another system creates the secret, which should be named docker_config_secret_name ... otherwise, this is a dictionary with an 'auths' key which, in turn, is a dictionary mapping registry host names to dictionaries of credential information: username, password, email, and auth", null=True), + ), + migrations.AlterField( + model_name='cluster', + name='docker_config_secret_name', + field=models.CharField(blank=True, default='docker-config-datacovesprivate', help_text='The Kubernetes secret to use with the Docker registry', max_length=253, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='docker_registry', + field=models.CharField(blank=True, help_text='Registry to pull images from. Can be blank for dockerhub.', max_length=253), + ), + migrations.AlterField( + model_name='cluster', + name='domain', + field=models.CharField(help_text='Base domain name for the cluster, without a leading .', max_length=253, unique=True), + ), + migrations.AlterField( + model_name='cluster', + name='dont_use_uwsgi', + field=models.BooleanField(default=False, help_text='Set true for development environments.'), + ), + migrations.AlterField( + model_name='cluster', + name='efs_provisioner', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text="Dynamic EFS provisioning is not yet supported; this can be a dictionary with a 'global' field to fake autoprovisioning; the global field should contain the fake-provisioned EFS information.", null=True), + ), + migrations.AlterField( + model_name='cluster', + name='external_dns_url', + field=models.CharField(blank=True, help_text='Sets the external-dns.alpha.kubernetes.io/target annotation on the cluster ingress. This requires provider support, and allows the creation of automatic DNS records.', max_length=253, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='external_ip', + field=models.CharField(blank=True, help_text="The cluster's external IP address. May be, and often is, blank.", max_length=15, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='extra_images', + field=models.JSONField(blank=True, default=list, help_text='Currently unused. This comes from Releases instead.', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='features_enabled', + field=models.JSONField(blank=True, default=clusters.models.cluster.default_features_enabled, help_text='Dictionary of feature flags. There are too many to document here; see the default_features_enabled method in api/app/clusters/models/cluster.py', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='grafana_settings', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Grafana settings, such as OIDC secrets', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='internal_db_cluster_ip_range', + field=models.CharField(blank=True, help_text='This is a CIDR-style IP address with netmask (i.e. 192.168.1.0/24). It is for using a block of IP addresses for Egress, similar to internal_dns_ip; it probably does not make sense to use this and the other two internal address fields above.', max_length=18, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='internal_dns_ip', + field=models.CharField(blank=True, help_text='Used to configure an Egress Rule to use a specific IP address. This is used on private networks mostly in order toaccess the IP address of a DNS server. You can set the Internal DNS URL instead if you want this to be dynamic.', max_length=15, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='internal_dns_url', + field=models.CharField(blank=True, help_text='This is a domain name which is resolved to get the internal_dns_ip address. It does not make sense to set both this and internal_dns_ip. See that field for more details.', max_length=253, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='internal_ip', + field=models.CharField(blank=True, help_text="The cluster's internal IP address. If this field is blank, external_ip should be blank as well, and both IP addresses will be fetched via Kubernetes' get_ingress_controller_ips call. Leaving this blank, but filling in external_ip, will probably cause the operator to fail.", max_length=15, null=True), + ), + migrations.AlterField( + model_name='cluster', + name='kubernetes_version', + field=models.CharField(help_text='Kubernetes version used by cluster', max_length=40), + ), + migrations.AlterField( + model_name='cluster', + name='limits', + field=models.JSONField(blank=True, default=clusters.models.cluster.default_limits, help_text='JSON Dictionary specifying the default cluster usage limits. It can have the following keys which map to integer limits: max_cluster_active_accounts, max_cluster_active_environments, max_cluster_active_trial_accounts, max_cluster_active_users ... not all of these are implemented yet.', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='postgres_db_provisioner', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary with PostgreSQL server information. It can have the following keys: host, pass, user, port (int), db ... If provided, we will automatically create PostgreSQL databases on the provided server (user/pass should belong to an admin user which can CREATE DATABASE). Leave empty to use a database in the cluster.', null=True), + ), + migrations.AlterField( + model_name='cluster', + name='provider', + field=models.CharField(choices=[('eks', 'EKS (Amazon)'), ('gke', 'GKE (Google)'), ('aks', 'AKS (Azure)'), ('kind', 'Kind (local)')], default='kind', help_text='Service Provider for Cluster', max_length=20), + ), + migrations.AlterField( + model_name='cluster', + name='release', + field=models.ForeignKey(default=clusters.models.cluster.default_release, help_text='Which release is being used for core services.', on_delete=django.db.models.deletion.PROTECT, related_name='clusters', to='projects.release'), + ), + migrations.AlterField( + model_name='cluster', + name='release_channel', + field=models.CharField(default='edge', help_text='Release channel to follow - Not used yet', max_length=20), + ), + migrations.AlterField( + model_name='cluster', + name='s3_provisioner', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text="Configuration to dynamically create S3 buckets. This should be a dictionary with 'aws_access_key_id', 'aws_secret_access_key' and 'region'. This enables us to automatically make S3 buckets as necessary. If not set, features that need S3 buckets (such as airflow logs to S3) will need manual configuration.", null=True), + ), + migrations.AlterField( + model_name='cluster', + name='superset_config', + field=models.JSONField(blank=True, default=dict, help_text='Default Superset configuration. This can be overridden per-environment. It is a dictionary, usually empty at this level.', null=True), + ), + migrations.AlterField( + model_name='clusteralert', + name='environment', + field=models.ForeignKey(blank=True, help_text='If this is null, then it is a system alert.', null=True, on_delete=django.db.models.deletion.CASCADE, to='projects.environment'), + ), + ] diff --git a/src/core/api/app/clusters/migrations/__init__.py b/src/core/api/app/clusters/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/models/__init__.py b/src/core/api/app/clusters/models/__init__.py new file mode 100644 index 00000000..a58a30bf --- /dev/null +++ b/src/core/api/app/clusters/models/__init__.py @@ -0,0 +1,2 @@ +from .cluster import * # noqa: F401,F403 +from .cluster_upgrade import ClusterUpgrade # noqa: F401,F403 diff --git a/src/core/api/app/clusters/models/cluster.py b/src/core/api/app/clusters/models/cluster.py new file mode 100644 index 00000000..85e2e48b --- /dev/null +++ b/src/core/api/app/clusters/models/cluster.py @@ -0,0 +1,780 @@ +import re +from functools import cached_property + +import sentry_sdk +from core.fields import EncryptedJSONField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from dateutil import parser +from django.conf import settings +from django.contrib import admin +from django.db import models +from django.urls import reverse +from django.utils import timezone +from notifications.models import AccountNotification + +import lib.kubernetes.client as k8s_client + + +def default_features_enabled(): + return { + "user_profile_delete_account": False, + "user_profile_change_name": False, + "user_profile_change_credentials": False, + "user_profile_change_ssh_keys": False, + "user_profile_change_ssl_keys": False, + "accounts_signup": False, + "admin_account": False, + "admin_groups": False, + "admin_create_groups": False, + "admin_invitations": False, + "admin_users": False, + "admin_projects": False, + "admin_environments": False, + "admin_connections": False, + "admin_service_credentials": False, + "admin_billing": False, + "admin_integrations": False, + "admin_secrets": False, + "admin_profiles": False, + "admin_code_server_environment_variables": False, + "admin_env_code_server_mem_and_cpu_resources": False, + "admin_env_airflow_mem_and_cpu_resources": False, + "stop_codeserver_on_inactivity": False, + "shareable_codeserver": False, + "codeserver_exposures": False, + "codeserver_restart": False, + "block_workers": False, + "observability_stack": False, + "select_minio_logs": False, + "show_get_started_banner": True, + "local_airflow": False, + "env_grafana_dashboards_enabled": False, + "node_local_dns_enabled": False, + } + + +def default_docker_config(): + return settings.DEFAULT_DOCKER_CONFIG + + +def default_limits(): + return { + "max_cluster_active_accounts": 20, + # "max_cluster_active_environments": 50, + "max_cluster_active_trial_accounts": 10, + # "max_cluster_active_users": 100, + } + + +def default_release(): + from projects.models import Release + + latests = Release.objects.get_latests().values_list("id", flat=True) + return latests[0] if latests else None + + +def default_alert_system_settings(): + return {"muted_notifications": []} + + +def default_cluster_settings(): + return {"admin_panel_color": "green", "code_server_inactivity_threshold": 30} + + +__all__ = ["Cluster", "ClusterAlert"] + + +class ClusterManager(models.Manager): + def current(self): + qs = self.get_queryset() + return qs.filter(domain=settings.BASE_DOMAIN) + + +class Cluster(AuditModelMixin, DatacovesModel): + """A cluster is a set of environments that are all running on a single + infrastructure provider (EKS, GKE, AKS, or local/Kind). + + The cluster associates the base domain name with the environments, + controls features, certain global settings, and keeps track of which + release is being used by the cluster as a whole. + + **Constants** + + -------------- + Provider Types + -------------- + + - EKS_PROVIDER + - GKE_PROVIDER + - AKS_PROVIDER + - KIND_PROVIDER + + ------------------ + Provider Selection + ------------------ + + - PROVIDERS + + Tuple of tuples, with each element tuple having the first element + being the provider type constant, and the second element being the + human readable text. + + ------------------ + Logs Backend Types + ------------------ + + - LOGS_BACKEND_S3 + - LOGS_BACKEND_EFS + - LOGS_BACKEND_AFS + - LOGS_BACKEND_NFS + + -------------------------- + Airbyte Logs Backend Types + -------------------------- + + - AIRBYTE_LOGS_BACKEND + + Tuple of tuples, with each element tuple having the first element + be the log backend type and the second element being the human readable + text. + + -------------------------- + Airflow Logs Backend Types + -------------------------- + + - AIRFLOW_LOGS_BACKEND + + Tuple of tuples, with each element tuple having the first element + be the log backend type and the second element being the human readable + text. + + ======= + Methods + ======= + + - **save** is overriden to provide certain default capabilities + - **has_dynamic_db_provisioning()** - Does this cluster use dynamic + S3 provisioning? + - **has_dynamic_network_filesystem_provisioning()** - + Does this cluster use dynamic EFS provisioning? + - **has_dynamic_blob_storage_provisioning()** - + Does this cluster use dynamic S3 provisioning? + - **is_feature_enabled(feature_code)** - Is the given feature enabled? + - **has_dynamic_provisioning()** - Are both dynamic DB and dynamic S3 + provisioning turned on? + - **get_image(repo, release)** - Returns the image name and tag for a + given repo. If the cluster uses a different docker registry, it is + prepended to the image name. + - **get_service_image(service, repo, tax_prefix, release)** - + Returns the image name and tag for a given repo and service. + If the cluster uses a different docker registry, it is + prepended to the image name. + """ + + EKS_PROVIDER = "eks" + GKE_PROVIDER = "gke" + AKS_PROVIDER = "aks" + KIND_PROVIDER = "kind" + + PROVIDERS = ( + ( + EKS_PROVIDER, + "EKS (Amazon)", + ), + ( + GKE_PROVIDER, + "GKE (Google)", + ), + ( + AKS_PROVIDER, + "AKS (Azure)", + ), + ( + KIND_PROVIDER, + "Kind (local)", + ), + ) + + LOGS_BACKEND_S3 = "s3" + LOGS_BACKEND_EFS = "efs" + LOGS_BACKEND_AFS = "afs" + LOGS_BACKEND_NFS = "nfs" + AIRBYTE_LOGS_BACKEND = ( + ( + LOGS_BACKEND_S3, + "S3", + ), + ) + AIRFLOW_LOGS_BACKEND = ( + ( + LOGS_BACKEND_S3, + "S3", + ), + ( + LOGS_BACKEND_EFS, + "EFS", + ), + ( + LOGS_BACKEND_AFS, + "AFS", + ), + ( + LOGS_BACKEND_NFS, + "NFS", + ), + ) + + domain = models.CharField( + max_length=253, + unique=True, + help_text="Base domain name for the cluster, without a leading .", + ) + provider = models.CharField( + max_length=20, + choices=PROVIDERS, + default=KIND_PROVIDER, + help_text="Service Provider for Cluster", + ) + kubernetes_version = models.CharField( + max_length=40, help_text="Kubernetes version used by cluster" + ) + cert_manager_issuer = models.CharField( + max_length=253, + null=True, + blank=True, + help_text="Sets the cert-manager.io/cluster-issuer annotation on " + "the cluster ingress - " + "https://cert-manager.io/docs/configuration/issuers/", + ) + external_dns_url = models.CharField( + max_length=253, + null=True, + blank=True, + help_text="Sets the external-dns.alpha.kubernetes.io/target " + "annotation on the cluster ingress. This requires provider " + "support, and allows the creation of automatic DNS records.", + ) + # This is necessary when on private networks to allow traffic + internal_dns_url = models.CharField( + max_length=253, + null=True, + blank=True, + help_text="This is a domain name which is resolved to get the " + "internal_dns_ip address. It does not make sense to set both " + "this and internal_dns_ip. See that field for more details.", + ) + internal_dns_ip = models.CharField( + max_length=15, + null=True, + blank=True, + help_text="Used to configure an Egress Rule to use a specific " + "IP address. This is used on private networks mostly in order to" + "access the IP address of a DNS server. You can " + "set the Internal DNS URL instead if you want this to be dynamic.", + ) + internal_db_cluster_ip_range = models.CharField( + max_length=18, + null=True, + blank=True, + help_text="This is a CIDR-style IP address with netmask (i.e. " + "192.168.1.0/24). It is for using a block of IP addresses for " + "Egress, similar to internal_dns_ip; it probably does not make " + "sense to use this and the other two internal address fields above.", + ) + internal_ip = models.CharField( + max_length=15, + null=True, + blank=True, + help_text="The cluster's internal IP address. If this field is " + "blank, external_ip should be blank as well, and both IP addresses " + "will be fetched via Kubernetes' get_ingress_controller_ips call. " + "Leaving this blank, but filling in external_ip, will probably cause " + "the operator to fail.", + ) + external_ip = models.CharField( + max_length=15, + null=True, + blank=True, + help_text="The cluster's external IP address. May be, and often is, " "blank.", + ) + api_server_ips = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="A JSON dictionary with two keys in it; 'ips' and 'ports'. " + "'ips' is a list of internal DNS IPs as strings, and 'ports' is the " + "corresponding list of port numbers as integers.", + ) + extra_images = models.JSONField( + default=list, + blank=True, + null=True, + help_text="Currently unused. This comes from Releases instead.", + ) + dont_use_uwsgi = models.BooleanField( + default=False, help_text="Set true for development environments." + ) + features_enabled = models.JSONField( + default=default_features_enabled, + blank=True, + null=True, + help_text="Dictionary of feature flags. There are too many to " + "document here; see the default_features_enabled method in " + "api/app/clusters/models/cluster.py", + ) + limits = models.JSONField( + default=default_limits, + blank=True, + null=True, + help_text="JSON Dictionary specifying the default cluster usage " + "limits. It can have the following keys which map to integer " + "limits: max_cluster_active_accounts, " + "max_cluster_active_environments, max_cluster_active_trial_accounts, " + "max_cluster_active_users ... not all of these are implemented yet.", + ) + + release_channel = models.CharField( + max_length=20, + default="edge", + help_text="Release channel to follow - Not used yet", + ) + + # Release of core services + release = models.ForeignKey( + "projects.Release", + on_delete=models.PROTECT, + related_name="clusters", + default=default_release, + help_text="Which release is being used for core services.", + ) + + # Docker + docker_registry = models.CharField( + max_length=253, + blank=True, + help_text="Registry to pull images from. Can be blank for dockerhub.", + ) + docker_config_secret_name = models.CharField( + max_length=253, + default="docker-config-datacovesprivate", + null=True, + blank=True, + help_text="The Kubernetes secret to use with the Docker registry", + ) + # An empty docker_config means core-api is not responsible for creating the + # secret, another system creates the secret named docker_config_secret_name. + docker_config = EncryptedJSONField( + default=default_docker_config, + blank=True, + null=True, + help_text="If blank, then core-api is not responsible for creating " + "the Docker config secret; another system creates the secret, " + "which should be named docker_config_secret_name ... otherwise, " + "this is a dictionary with an 'auths' key which, in turn, is a " + "dictionary mapping registry host names to dictionaries of " + "credential information: username, password, email, and auth", + ) + + grafana_settings = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Grafana settings, such as OIDC secrets", + ) + alert_system_settings = models.JSONField( + default=default_alert_system_settings, + blank=True, + null=True, + help_text='Alert system settings, such as muted notifs ({"muted_notifications":' + ' [{"namespace": "cloudwatch", "pod": "~worker-.*", "name": "=ContainerCpuUsage",' + ' "channel": "slack"}]}).', + ) + # Config to dynamically create postgres dbs + postgres_db_provisioner = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary with PostgreSQL server information. It " + "can have the following keys: host, pass, user, port (int), db ... " + "If provided, we will automatically create PostgreSQL databases on " + "the provided server (user/pass should belong to an admin user " + "which can CREATE DATABASE). Leave empty to use a database in the " + "cluster.", + ) + + # Config to dynamically create S3 buckets + s3_provisioner = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="Configuration to dynamically create S3 buckets. This " + "should be a dictionary with 'aws_access_key_id', " + "'aws_secret_access_key' and 'region'. This enables us to " + "automatically make S3 buckets as necessary. If not set, " + "features that need S3 buckets (such as airflow logs to S3) will " + "need manual configuration.", + ) + + # Config to dynamically create EFS filesystems + efs_provisioner = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="Dynamic EFS provisioning is not yet supported; this can " + "be a dictionary with a 'global' field to fake autoprovisioning; " + "the global field should contain the fake-provisioned EFS information.", + ) + + airbyte_config = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Default AirByte configuration. This can be overridden " + "per-environment. It is a dictionary, typically with 'db' and " + "'logs' keys mapping to dictionaries with configuration for both.", + ) + + airflow_config = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Default AirFlow configuration. This can be overridden " + "per-environment. It is a dictionary, usually empty at this level.", + ) + + superset_config = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Default Superset configuration. This can be overridden " + "per-environment. It is a dictionary, usually empty at this level.", + ) + + code_server_config = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Default Code Server configuration. This can be overridden " + "per-environment. It is a dictionary which typically has a " + "'resources' dictionary of Kubernetes resource allocations, " + "an 'overprovisioning' dictionary which has settings for hot spares, " + "and finally a key 'max_code_server_pods_per_node'. This is not an " + "exhaustive list.", + ) + + datahub_config = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Default DataHub configuration. This can be overriden " + "per-environment. It is a dictionary, usually empty at this level.", + ) + + celery_heartbeat_at = models.DateTimeField( + null=True, + blank=True, + help_text="Last time Celery reported in; not normally edited by users", + ) + + # Cluster general settings + settings = models.JSONField( + default=default_cluster_settings, + null=True, + blank=True, + help_text="Configures 'admin_panel_color' (CSS color code) and " + "'code_server_inactivity_threshold' (minutes)", + ) + + # Config to store service accounts + service_account = EncryptedJSONField(default=dict, blank=True, null=True) + + objects = ClusterManager() + + def __str__(self): + return self.domain + + def save(self, *args, **kwargs): + """This overrides save in order to handle defaulting of internal_ip + and external_ip, along with api_server_ips. It also copies + all_features (default + overriden features) over to features_enabled + """ + + if not self.internal_ip or not self.external_ip: + kc = self.kubectl + self.internal_ip, self.external_ip = kc.get_ingress_controller_ips() + + if not self.api_server_ips: + self.api_server_ips = self.kubectl.get_cluster_apiserver_ips() + + self.features_enabled = self.all_features + + return super().save(*args, **kwargs) + + @property + def is_local(self) -> bool: + """Is this a local installation (i.e. uses Kind as a provider)""" + return self.provider == self.KIND_PROVIDER + + @property + def upgrade_in_progress(self) -> bool: + """Are upgrades currently running on the system?""" + upgrade = self.upgrades.order_by("-id").first() + return upgrade and upgrade.status == upgrade.STATUS_RUNNING + + @property + def defines_resource_requests(self) -> bool: + """Does this cluster have resource requests?""" + return True # not self.is_local + + @property + def all_features(self) -> dict: + """Default features + overrided features""" + features = default_features_enabled() + features.update(self.features_enabled) + + features["accounts_signup"] = ( + features["accounts_signup"] + and settings.BILLING_ENABLED + and (self.is_local or self.has_dynamic_provisioning()) + ) + return features + + @property + def all_limits(self) -> dict: + """Default limits + overrided limits""" + limits = default_limits() + limits.update(self.limits) + return limits + + def has_dynamic_db_provisioning(self) -> bool: + """Does this cluster use dynamic DB provisioning?""" + return bool(self.postgres_db_provisioner) + + def has_dynamic_blob_storage_provisioning(self) -> bool: + """Does this cluster use dynamic S3 provisioning?""" + return bool(self.s3_provisioner) + + def has_dynamic_network_filesystem_provisioning(self) -> bool: + """Does this cluster use dynamic EFS provisioning?""" + return bool(self.efs_provisioner) + + def is_feature_enabled(self, code: str) -> bool: + """Is the given feature enabled?""" + return self.all_features.get(code, False) + + def has_dynamic_provisioning(self) -> bool: + """Are both dynamic DB and dynamic S3 provisioning turned on?""" + return ( + self.has_dynamic_db_provisioning() + and self.has_dynamic_blob_storage_provisioning() + ) + + def get_image(self, repo: str, release=None): + """ + Returns the image name and tag for a given repo. + If the cluster uses a different docker registry, it is prepended to the image name. + """ + rel = release or self.release + image, tag = rel.get_image(repo) + if self.docker_registry: + image = f"{self.docker_registry}/{image}" + return image, tag + + def get_service_image(self, service: str, repo: str, tag_prefix=None, release=None): + """ + Returns the image name and tag for a given repo and service. + If the cluster uses a different docker registry, it is prepended to the image name. + """ + rel = release or self.release + image, tag = rel.get_service_image(service, repo, tag_prefix=tag_prefix) + if self.docker_registry: + image = f"{self.docker_registry}/{image}" + return image, tag + + @cached_property + def kubectl(self): + """Accessor for the k8s_client Kubectl object""" + return k8s_client.Kubectl() + + +class ClusterAlert(DatacovesModel): + """Not sure what this is -- coming back to it + + ========= + Constants + ========= + + - STATUS_FIRING - Alert is active + - STATUS_RESOLVED - Alert is resolved + - STATUS_CHOICES - tuple of tuple pairs for select box + """ + + STATUS_FIRING = "firing" + STATUS_RESOLVED = "resolved" + STATUS_CHOICES = [ + (STATUS_FIRING, "Firing"), + (STATUS_RESOLVED, "Resolved"), + ] + + created_at = models.DateTimeField(auto_now_add=True, editable=False) + started_at = models.DateTimeField() + name = models.CharField(max_length=100) + namespace = models.CharField(max_length=63, null=True, blank=True) + cluster = models.ForeignKey("clusters.Cluster", on_delete=models.CASCADE) + environment = models.ForeignKey( + "projects.Environment", + on_delete=models.CASCADE, + null=True, + blank=True, + help_text="If this is null, then it is a system alert.", + ) + status = models.CharField( + choices=STATUS_CHOICES, max_length=10, null=True, blank=True + ) + resolved = models.BooleanField(default=False) + data = models.JSONField() + + def __str__(self): + return self.summary + + @admin.display(boolean=True) + def is_system_alert(self): + return self.environment is None + + @property + def summary(self): + """Fetches the 'summary' key from the 'annotations' key of the + data field""" + return self.data.get("annotations", {}).get("summary") + + def generate_metadata(self): + """Method to generate slack metadata.""" + started_at = parser.parse(self.data.get("startsAt")) + ended_at = parser.parse(self.data.get("endsAt")) + value = self.data.get("annotations", {}).get("value") + if ended_at < started_at: + is_current = True + failing_time = timezone.now() - started_at + else: + is_current = False + failing_time = ended_at - started_at + metadata = { + "alert_type": self.data.get("labels", {}).get("alertname"), + "cluster": self.cluster.domain, + "namespace": self.namespace, + "environment": self.environment.slug if self.environment else None, + "account": ( + self.environment.project.account.slug if self.environment else None + ), + "pod": self.data.get("labels", {}).get("pod"), + "node": self.data.get("labels", {}).get("node"), + "failing_time": f"{failing_time.total_seconds()} seconds", + "value": value, + "started_at": started_at, + "severity": self.data.get("labels", {}).get("severity"), + } + if not is_current: + metadata["ended_at"] = ended_at + return metadata + + def generate_extra_fields(self): + fields = [ + "persistentvolumeclaim", + "phase", + "device", + "fstype", + "mountpoint", + "replicaset", + "release", + "status", + "deployment", + "statefulset", + ] + extra_fields = {} + for field in fields: + field_value = self.data.get("labels", {}).get(field) + if field_value: + extra_fields[field] = field_value + return extra_fields + + def can_send_notification(self, channel: str) -> bool: + def match(value, filter): + if not value or not filter: + return True + operator = "~" if filter[0] == "~" else "=" + if filter[0] in ["=", "~"]: + filter = filter[1:] + return (operator == "=" and filter.lower() == value.lower()) or ( + operator == "~" and re.search(filter, value) + ) + + if self.cluster.upgrade_in_progress: + return False + + if self.cluster.alert_system_settings: + # Don't send notifications for certain pre-configured conditions + for condition in self.cluster.alert_system_settings.get( + "muted_notifications", [] + ): + # TODO: Extend to support more attributes and operators + namespace_match = match(self.namespace, condition.get("namespace")) + pod_match = match( + self.data.get("labels", {}).get("pod"), + condition.get("pod"), + ) + name_match = match(self.name, condition.get("name")) + channel_match = match(channel, condition.get("channel")) + if namespace_match and pod_match and name_match and channel_match: + return False + return True + + def generate_notifications(self): + metadata = self.generate_metadata() + path = reverse("admin:clusters_clusteralert_change", args=(self.id,)) + link = f"https://api.{self.cluster.domain}{path}" + + if self.can_send_notification("sentry"): + with sentry_sdk.push_scope() as scope: + scope.set_tag( + "cluster_namespace", + f"{metadata['namespace']} [{self.cluster.domain}]", + ) + scope.set_tag("issue_type", "cluster_alert") + if self.environment: + scope.set_tag( + "account", + f"{self.environment.project.account.name} [{self.cluster.domain}]", + ) + scope.set_tag( + "env", + f"{self.environment.name} ({self.environment.slug}) [{self.cluster.domain}]", + ) + scope.set_tag("env_type", self.environment.type) + scope.set_tag( + "project", + f"{self.environment.project.name} ({self.environment.project.slug}) [{self.cluster.domain}]", + ) + scope.set_extra("alert_link", link) + + level = "info" if metadata["severity"] == "warning" else "error" + sentry_sdk.capture_message(self.summary, level) + + if not self.is_system_alert(): + account_notification = AccountNotification( + cluster_alert=self, + environment=self.environment, + account=self.environment.account, + title=f"Account alert: {self.name} on environment: {self.environment.name}", + body=self.summary, + kind=AccountNotification.KIND_CLUSTER, + ) + extra_fields = self.generate_extra_fields() + date_format = "%a, %d %b %Y %H:%M:%S (%Z)" + metadata["started_at"] = metadata["started_at"].strftime(date_format) + if "ended_at" in metadata: + metadata["ended_at"] = metadata["ended_at"].strftime(date_format) + metadata.update(extra_fields) + account_notification.set_slack_data(metadata) + account_notification.set_slack_link("More details", link) + account_notification.save(send_on_save=True) diff --git a/src/core/api/app/clusters/models/cluster_upgrade.py b/src/core/api/app/clusters/models/cluster_upgrade.py new file mode 100644 index 00000000..f32b2dcf --- /dev/null +++ b/src/core/api/app/clusters/models/cluster_upgrade.py @@ -0,0 +1,51 @@ +from datetime import timedelta + +from core.models import DatacovesModel +from django.db import models +from django.utils import timezone + + +class ClusterUpgrade(DatacovesModel): + """Model to keep track of cluster upgrade attempts and status + + ========= + Constants + ========= + + - STATUS_FINISHED - Upgrade completed successfully + - STATUS_FAILED - Upgrade failed, no longer running + - STATUS_RUNNING - Upgrade in progress + """ + + STATUS_FINISHED = "finished" + STATUS_FAILED = "failed" + STATUS_RUNNING = "running" + + cluster = models.ForeignKey( + "clusters.Cluster", on_delete=models.CASCADE, related_name="upgrades" + ) + release_name = models.CharField(max_length=200) + started_at = models.DateTimeField(auto_now_add=True, editable=False) + finished_at = models.DateTimeField(null=True, blank=True, editable=False) + triggered_by = models.CharField(max_length=200, null=True, blank=True) + + def __str__(self): + return f"{self.cluster} - {self.started_at}" + + @property + def status(self): + """Returns finished if finished_at was set, running if started_at is within the last hour, and failed if + started_at is older than 1 hour. + """ + + if self.finished_at: + # if completed less than 5 minutes ago, wait until all env changes propagate + if self.finished_at > (timezone.now() - timedelta(minutes=5)): + return self.STATUS_RUNNING + else: + return self.STATUS_FINISHED + return ( + self.STATUS_RUNNING + if self.started_at > (timezone.now() - timedelta(minutes=30)) + else self.STATUS_FAILED + ) diff --git a/src/core/api/app/clusters/observability/__init__.py b/src/core/api/app/clusters/observability/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/observability/grafana/__init__.py b/src/core/api/app/clusters/observability/grafana/__init__.py new file mode 100644 index 00000000..96aae8fc --- /dev/null +++ b/src/core/api/app/clusters/observability/grafana/__init__.py @@ -0,0 +1 @@ +from .grafana import GrafanaApi # noqa diff --git a/src/core/api/app/clusters/observability/grafana/dashboards/airflow_dags_overview.json b/src/core/api/app/clusters/observability/grafana/dashboards/airflow_dags_overview.json new file mode 100644 index 00000000..468d0920 --- /dev/null +++ b/src/core/api/app/clusters/observability/grafana/dashboards/airflow_dags_overview.json @@ -0,0 +1,1143 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Airflow DAGs Overview", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "description": "", + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 30, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "
\n

${dag_id}

\n
\n", + "mode": "html" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "refId": "A" + } + ], + "title": "DAG Selected", + "type": "text" + }, + { + "datasource": { + "default": false, + "type": "postgres", + "uid": "ds-airflow-db" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": true + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "run_id" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "title": "Details", + "url": "/d/${__dashboard.uid}/${__dashboard}?orgId=${__org}&var-dag_id=$dag_id&var-run_id=${__data.fields.run_id:percentencode}" + } + ] + }, + { + "id": "custom.width", + "value": 341 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "state" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "failed": { + "color": "semi-dark-red", + "index": 2 + }, + "queued": { + "color": "#808080", + "index": 3 + }, + "running": { + "color": "super-light-green", + "index": 1 + }, + "success": { + "color": "green", + "index": 0 + } + }, + "type": "value" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "duration" + }, + "properties": [ + { + "id": "unit", + "value": "s" + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 25, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "ds-airflow-db" + }, + "editorMode": "code", + "format": "table", + "rawQuery": true, + "rawSql": "SELECT dr.run_id, dr.execution_date, dr.start_date, dr.end_date, dr.run_type, dr.state, sum(ti.duration) \"duration\"\nFROM dag_run dr\nLEFT JOIN task_instance ti ON ti.dag_id=dr.dag_id AND ti.run_id=dr.run_id\nWHERE dr.dag_id = '$dag_id' AND\ndr.start_date BETWEEN $__timeFrom() AND $__timeTo()\nOR dr.end_date BETWEEN $__timeFrom() AND $__timeTo()\nGROUP BY (dr.id, dr.run_id, dr.execution_date, dr.start_date, dr.end_date, dr.run_type, dr.state)\nORDER BY dr.id DESC\nLIMIT 100", + "refId": "A", + "sql": { + "columns": [ + { + "parameters": [ + { + "name": "dag_id", + "type": "functionParameter" + } + ], + "type": "function" + } + ], + "groupBy": [ + { + "property": { + "type": "string" + }, + "type": "groupBy" + } + ], + "limit": 50 + }, + "table": "dag_run" + } + ], + "title": "DAG Runs", + "type": "table" + }, + { + "datasource": { + "default": false, + "type": "postgres", + "uid": "ds-airflow-db" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "state" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "failed": { + "color": "semi-dark-red", + "index": 3 + }, + "queued": { + "color": "#b7bbc3", + "index": 1 + }, + "running": { + "color": "super-light-green", + "index": 2 + }, + "success": { + "color": "semi-dark-green", + "index": 0 + } + }, + "type": "value" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "task_id" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "title": "", + "url": "/d/${__dashboard.uid}/${__dashboard}?orgId=${__org}&var-dag_id=$dag_id&var-run_id=${run_id:percentencode}&var-task_id=${__data.fields.task_id}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "duration" + }, + "properties": [ + { + "id": "unit", + "value": "s" + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "run_id" + }, + "properties": [ + { + "id": "custom.hidden", + "value": true + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "worker" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "title": "", + "url": "/d/${__dashboard.uid}/${__dashboard}?orgId=${__org}&var-dag_id=$dag_id&var-run_id=${run_id:percentencode}&var-task_id=${__data.fields.task_id}&var-pod_name=${__data.fields.worker}" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 11 + }, + "id": 26, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": false, + "fields": [], + "reducer": [], + "show": true + }, + "showHeader": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "ds-airflow-db" + }, + "editorMode": "code", + "format": "table", + "rawQuery": true, + "rawSql": "SELECT task_id as task, hostname worker, state, run_id, start_date, end_date, duration as duration\nFROM task_instance\nWHERE dag_id='$dag_id' AND run_id='$run_id'\nORDER BY start_date ASC\nLIMIT 100", + "refId": "A", + "sql": { + "columns": [ + { + "parameters": [ + { + "name": "task_id", + "type": "functionParameter" + } + ], + "type": "function" + } + ], + "groupBy": [ + { + "property": { + "type": "string" + }, + "type": "groupBy" + } + ], + "limit": 50 + }, + "table": "task_instance" + } + ], + "title": "Task Instance [run_id=$run_id]", + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "description": "", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 20 + }, + "id": 38, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "
\n

Run ID: $run_id

\n

Worker: $pod_name

\n
\n", + "mode": "html" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "refId": "A" + } + ], + "title": "Worker Details", + "type": "text" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "memory limits" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory requets" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory usage" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 34, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "center", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": false + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(kube_pod_container_resource_limits{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"memory\"})", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "memory limits", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(kube_pod_container_resource_requests{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"memory\"})", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "memory requets", + "range": true, + "refId": "B" + } + ], + "title": "Memory Resources", + "type": "stat" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Limit" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Request" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(container_memory_working_set_bytes{job=\"kubelet\", metrics_path=\"/metrics/cadvisor\", namespace=\"dcw-$env\", container!=\"\", image!=\"\", pod=\"$pod_name\"}) by (pod)", + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "kube_pod_container_resource_limits{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"memory\"}", + "hide": false, + "instant": false, + "legendFormat": "Limit", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "kube_pod_container_resource_requests{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"memory\"}", + "hide": false, + "instant": false, + "legendFormat": "Request", + "range": true, + "refId": "C" + } + ], + "title": "Memory Usage", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "bytes" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "memory limits" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-red", + "mode": "fixed", + "seriesBy": "last" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory requets" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "semi-dark-yellow", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "memory usage" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "dark-green", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 0, + "y": 30 + }, + "id": 39, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "center", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": false + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(kube_pod_container_resource_limits{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"cpu\"})", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "cpu limits", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "avg(kube_pod_container_resource_requests{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"cpu\"})", + "format": "time_series", + "hide": false, + "instant": false, + "legendFormat": "cpu requets", + "range": true, + "refId": "B" + } + ], + "title": "CPU Resources", + "type": "stat" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Limit" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Request" + }, + "properties": [ + { + "id": "color", + "value": { + "fixedColor": "yellow", + "mode": "fixed" + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 12, + "x": 12, + "y": 30 + }, + "id": 40, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"dcw-$env\", pod=\"$pod_name\", container!=\"\"}[5m])) by (pod)", + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "kube_pod_container_resource_limits{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"cpu\"}", + "hide": false, + "instant": false, + "legendFormat": "Limit", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "kube_pod_container_resource_requests{namespace=\"dcw-$env\", pod=\"$pod_name\", resource=\"cpu\"}", + "hide": false, + "instant": false, + "legendFormat": "Request", + "range": true, + "refId": "C" + } + ], + "title": "CPU Usage", + "type": "timeseries" + }, + { + "datasource": { + "default": false, + "type": "loki", + "uid": "ds-loki" + }, + "gridPos": { + "h": 16, + "w": 24, + "x": 0, + "y": 36 + }, + "id": 37, + "options": { + "dedupStrategy": "none", + "enableLogDetails": false, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": false, + "sortOrder": "Ascending", + "wrapLogMessage": false + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "ds-loki" + }, + "editorMode": "code", + "expr": "{namespace=\"dcw-$env\", pod=\"$pod_name\"} |= ``", + "queryType": "range", + "refId": "A" + } + ], + "title": "Worker Logs", + "type": "logs" + } + ], + "refresh": "10s", + "schemaVersion": 39, + "tags": [ + "Airflow" + ], + "templating": { + "list": [ + { + "hide": 2, + "label": "Env", + "name": "env", + "query": "", + "skipUrlSync": false, + "type": "constant" + }, + { + "current": { + "isNone": true, + "selected": false, + "text": "None", + "value": "" + }, + "datasource": { + "type": "postgres", + "uid": "ds-airflow-db" + }, + "definition": "SELECT dag_id\nFROM dag_run\nWHERE execution_date BETWEEN $__timeFrom() AND $__timeTo()\nLIMIT 200", + "hide": 0, + "includeAll": false, + "label": "DAG", + "multi": false, + "name": "dag_id", + "options": [], + "query": "SELECT dag_id\nFROM dag_run\nWHERE execution_date BETWEEN $__timeFrom() AND $__timeTo()\nLIMIT 200", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 1, + "type": "query" + }, + { + "current": { + "isNone": true, + "selected": false, + "text": "None", + "value": "" + }, + "datasource": { + "type": "postgres", + "uid": "ds-airflow-db" + }, + "definition": "SELECT run_id\nFROM dag_run\nWHERE dag_id='$dag_id'\nORDER BY id DESC\nLIMIT 5", + "hide": 0, + "includeAll": false, + "label": "Run Id", + "multi": false, + "name": "run_id", + "options": [], + "query": "SELECT run_id\nFROM dag_run\nWHERE dag_id='$dag_id'\nORDER BY id DESC\nLIMIT 5", + "refresh": 2, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": false, + "text": "", + "value": "" + }, + "description": "", + "hide": 0, + "label": "Worker", + "name": "pod_name", + "options": [], + "query": "", + "skipUrlSync": false, + "type": "textbox" + } + ] + }, + "time": { + "from": "now-3h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Airflow DAGs Overview", + "uid": null, + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/src/core/api/app/clusters/observability/grafana/dashboards/airflow_service.json b/src/core/api/app/clusters/observability/grafana/dashboards/airflow_service.json new file mode 100644 index 00000000..b7c6297c --- /dev/null +++ b/src/core/api/app/clusters/observability/grafana/dashboards/airflow_service.json @@ -0,0 +1,1914 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "description": "Airflow Service", + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 58, + "panels": [], + "title": "Airflow", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 1 + }, + "id": 7, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "center", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_executor_open_slots{namespace=\"dcw-$env\"} + airflow_executor_running_tasks{namespace=\"dcw-$env\"})", + "hide": false, + "legendFormat": "Total", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_executor_open_slots{namespace=\"dcw-$env\"})", + "legendFormat": "Available", + "range": true, + "refId": "A" + } + ], + "title": "Task Slots Available", + "type": "stat" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "decimals": 0, + "mappings": [ + { + "options": { + "from": 0, + "result": { + "color": "green", + "index": 0 + }, + "to": 79 + }, + "type": "range" + }, + { + "options": { + "from": 80, + "result": { + "color": "yellow", + "index": 1 + }, + "to": 94 + }, + "type": "range" + }, + { + "options": { + "from": 95, + "result": { + "color": "red", + "index": 2 + }, + "to": 100 + }, + "type": "range" + } + ], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 80 + }, + { + "color": "red", + "value": 95 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 1 + }, + "id": 44, + "options": { + "minVizHeight": 75, + "minVizWidth": 75, + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true, + "sizing": "auto" + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_executor_running_tasks{namespace=\"dcw-$env\"} / (airflow_executor_open_slots{namespace=\"dcw-$env\"} + airflow_executor_running_tasks{namespace=\"dcw-$env\"})) * 100", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Busy Task Slots", + "type": "gauge" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 90 + }, + { + "color": "red", + "value": 120 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 1 + }, + "id": 17, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(airflow_dagbag_size{namespace=\"dcw-$env\"}) by (namespace)", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "DAG Bag Size", + "type": "stat" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 1 + }, + "id": 18, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "percentChangeColorMode": "standard", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showPercentChange": false, + "textMode": "auto", + "wideLayout": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_dag_processing_import_errors{namespace=\"dcw-$env\"})", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "DAGs Import Errors", + "type": "stat" + }, + { + "datasource": { + "default": false, + "type": "postgres", + "uid": "ds-airflow-db" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "run_id" + }, + "properties": [ + { + "id": "links", + "value": [ + { + "title": "", + "url": "/d/airflow-dags-overview-${env}/Airflow%20DAGs%20Overview?orgId=${__org}&var-dag_id=${__data.fields.dag_id:percentencode}&var-run_id=${__data.fields.run_id:percentencode}" + } + ] + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "state" + }, + "properties": [ + { + "id": "mappings", + "value": [ + { + "options": { + "success": { + "color": "green", + "index": 0 + } + }, + "type": "value" + } + ] + } + ] + } + ] + }, + "gridPos": { + "h": 14, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 66, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "enablePagination": true, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "postgres", + "uid": "ds-airflow-db" + }, + "editorMode": "code", + "format": "table", + "rawQuery": true, + "rawSql": "SELECT dag_id, run_id, state, execution_date, start_date, end_date, end_date - start_date as duration\nFROM dag_run\nWHERE start_date BETWEEN $__timeFrom() AND $__timeTo()\nOR end_date BETWEEN $__timeFrom() AND $__timeTo()\nORDER BY start_date DESC\nLIMIT 200", + "refId": "A", + "sql": { + "columns": [ + { + "parameters": [], + "type": "function" + } + ], + "groupBy": [ + { + "property": { + "type": "string" + }, + "type": "groupBy" + } + ], + "limit": 50 + } + } + ], + "title": "Dag Run", + "type": "table" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 5, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 21 + }, + "id": 9, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_executor_open_slots{namespace=\"dcw-$env\"})", + "hide": false, + "legendFormat": "open slots", + "range": true, + "refId": "C" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_executor_running_tasks{namespace=\"dcw-$env\"})", + "legendFormat": "running", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_executor_queued_tasks{namespace=\"dcw-$env\"})", + "hide": false, + "legendFormat": "queued", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (rate(airflow_scheduler_tasks_killed_externally{namespace=\"dcw-$env\"}[$__rate_interval]))", + "hide": false, + "legendFormat": "killed", + "range": true, + "refId": "D" + } + ], + "title": "Tasks Slots Over Time", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 5, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 21 + }, + "id": 4, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "rate(airflow_scheduler_heartbeat{namespace=\"dcw-$env\"}[1m])*60", + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + } + ], + "title": "Scheduler Hearbeat per Minute", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 5, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 29 + }, + "id": 20, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(round(increase(airflow_ti_successes{namespace=\"dcw-$env\"}[$__rate_interval]))) by (namespace)", + "legendFormat": "successes", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(round(increase(airflow_ti_start{namespace=\"dcw-$env\"}[$__rate_interval]))) by (namespace)", + "hide": false, + "legendFormat": "started", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(round(increase(airflow_ti_finish{namespace=\"dcw-$env\"}[$__rate_interval]))) by (namespace)", + "hide": false, + "instant": false, + "legendFormat": "finished", + "range": true, + "refId": "C" + } + ], + "title": "Tasks Instances", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 29 + }, + "id": 62, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "airflow_dag_processing_last_duration_sync_airflow_db{namespace=\"dcw-$env\", quantile=\"0.99\"}", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "DAGs Sync Database", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "description": "Executable: Number of tasks that are ready for execution (set to queued) with respect to pool limits, DAG concurrency, executor state, and priority.\nStarving: Number of tasks that cannot be scheduled because of no open slot in pool.\nRunning: Number of tasks running on scheduler.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 5, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 37 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_scheduler_tasks_executable{namespace=\"dcw-$env\"})", + "legendFormat": "executable", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_scheduler_tasks_starving{namespace=\"dcw-$env\"})", + "hide": false, + "legendFormat": "starving", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_scheduler_tasks_running{namespace=\"dcw-$env\"})", + "hide": false, + "legendFormat": "running", + "range": true, + "refId": "C" + } + ], + "title": "Scheduler Tasks", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 37 + }, + "id": 52, + "options": { + "legend": { + "calcs": [], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(round(increase(airflow_operator_successes{namespace=\"dcw-$env\", operator!=\"\"}[$__rate_interval]))) by (namespace, operator)", + "hide": false, + "instant": false, + "legendFormat": "{{operator}}", + "range": true, + "refId": "B" + } + ], + "title": "Operator Successes Rate", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 64, + "panels": [], + "title": "Dag Processing", + "type": "row" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "description": "Seconds taken to scan and import all DAG files once", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 5, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 46 + }, + "id": 48, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum(airflow_dag_processing_total_parse_time{namespace=\"dcw-$env\"}) by (namespace)", + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + } + ], + "title": "DAGs File processing Time", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "description": "Number of errors from trying to parse DAG files", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "red", + "mode": "fixed" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red" + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 46 + }, + "id": 42, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "sum by (namespace) (airflow_dag_processing_import_errors{namespace=\"dcw-$env\"})", + "legendFormat": "{{pod}}", + "range": true, + "refId": "A" + } + ], + "title": "DAGs File Parsing Errors", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "ms" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "DAG" + }, + "properties": [ + { + "id": "custom.width", + "value": 928 + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 54 + }, + "id": 63, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "topk(10, label_replace({__name__=~\"airflow_dag_processing_last_duration_.*\", quantile=\"0.99\", namespace=\"dcw-$env\", __name__!~\".*(_sum|_count)$\"}, \"dag_name\", \"$1\", \"__name__\", \"airflow_dag_processing_last_duration_(.+)\"))", + "format": "table", + "instant": true, + "legendFormat": "__auto", + "range": false, + "refId": "A" + } + ], + "title": "DAGs Parsing (Top 10)", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "__name__": true, + "container": true, + "endpoint": true, + "instance": true, + "job": true, + "namespace": true, + "pod": true, + "prometheus": true, + "prometheus_replica": true, + "quantile": true, + "service": true + }, + "includeByName": {}, + "indexByName": {}, + "renameByName": { + "Value": "Duration", + "dag_name": "DAG", + "service": "" + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 62 + }, + "id": 59, + "panels": [], + "title": "Airflow Database", + "type": "row" + }, + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 0, + "y": 63 + }, + "id": 56, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "11.2.2", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "pgbouncer_exporter_pools_active_clients{database=\"$env-airflow-metadata\"}", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Database Pool Active Clients", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 8, + "y": 63 + }, + "id": 55, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "expr": "pgbouncer_exporter_database_current_connections{name=\"$env-airflow-metadata\"}", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Database Current Connections", + "type": "timeseries" + }, + { + "datasource": { + "default": true, + "type": "prometheus", + "uid": "ds-prometheus" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 10, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "fieldMinMax": false, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 7, + "w": 8, + "x": 16, + "y": 63 + }, + "id": 57, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "ds-prometheus" + }, + "editorMode": "code", + "exemplar": false, + "expr": "rate(pgbouncer_exporter_stats_total_query_count{database=\"$env-airflow-metadata\"}[5m])", + "instant": false, + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Database Total Queries", + "type": "timeseries" + }, + { + "collapsed": false, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 70 + }, + "id": 60, + "panels": [], + "title": "Logs", + "type": "row" + }, + { + "datasource": { + "type": "loki", + "uid": "ds-loki" + }, + "gridPos": { + "h": 15, + "w": 24, + "x": 0, + "y": 71 + }, + "id": 50, + "options": { + "dedupStrategy": "none", + "enableLogDetails": false, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": false, + "sortOrder": "Ascending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "ds-loki" + }, + "editorMode": "builder", + "expr": "{namespace=\"dcw-$env\", component=\"scheduler\"}", + "queryType": "range", + "refId": "A" + } + ], + "title": "Scheduler Logs", + "type": "logs" + }, + { + "datasource": { + "default": false, + "type": "loki", + "uid": "ds-loki" + }, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 86 + }, + "id": 65, + "options": { + "dedupStrategy": "none", + "enableLogDetails": false, + "prettifyLogMessage": false, + "showCommonLabels": false, + "showLabels": false, + "showTime": false, + "sortOrder": "Ascending", + "wrapLogMessage": true + }, + "targets": [ + { + "datasource": { + "type": "loki", + "uid": "ds-loki" + }, + "editorMode": "builder", + "expr": "{namespace=\"dcw-$env\", component=\"scheduler\"} |= `ERROR`", + "queryType": "range", + "refId": "A" + } + ], + "title": "Scheduler Logs Errors", + "type": "logs" + } + ], + "refresh": "10s", + "schemaVersion": 39, + "tags": [ + "Airflow" + ], + "templating": { + "list": [ + { + "hide": 2, + "label": "Env", + "name": "env", + "query": "", + "skipUrlSync": false, + "type": "constant" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "Airflow Service", + "uid": null, + "version": 1, + "weekStart": "" +} \ No newline at end of file diff --git a/src/core/api/app/clusters/observability/grafana/grafana.py b/src/core/api/app/clusters/observability/grafana/grafana.py new file mode 100644 index 00000000..52ed158a --- /dev/null +++ b/src/core/api/app/clusters/observability/grafana/grafana.py @@ -0,0 +1,423 @@ +import json +from typing import Optional + +import requests +from django.conf import settings +from projects.models.environment import Environment + + +class GrafanaApi: + def __init__(self, enviroment: Environment): + self.env = enviroment + self.namespace_observability_stack = "prometheus" + + def _normalize_name(self, value: str): + return value.replace(" ", "-").lower() + + def create_basic_config(self): + if not self._health_check(): + raise Exception( + "Could not access Grafana, please check if the Grafana service is running successfully." + ) + + if self.env.grafana_config.get("credentials") is None: + self._create_service_account() + + self._create_datasources() + self._create_folder() + self._load_dashboards() + + Environment.objects.filter(id=self.env.id).update( + grafana_config=self.env.grafana_config + ) + + def _health_check(self) -> bool: + base_url = self._base_url() + r = requests.get(f"{base_url}/health") + return r.ok and r.json()["database"] == "ok" + + def _base_url(self, add_admin_user=False) -> str: + """Grafana get base url to admin user or service account""" + if add_admin_user: + user = self.env.cluster.service_account["grafana"]["username"] + password = self.env.cluster.service_account["grafana"]["password"] + + return ( + f"http://{user}:{password}@prometheus-grafana." + f"{self.namespace_observability_stack}.svc.cluster.local/api" + ) + + return f"http://prometheus-grafana.{self.namespace_observability_stack}.svc.cluster.local/api" + + def _headers(self, add_bearer_token=True) -> dict: + headers = {"Content-Type": "application/json; charset=utf-8"} + if add_bearer_token: + try: + token = self.env.grafana_config["credentials"]["service_account"][ + "token" + ]["key"] + headers.update({"Authorization": f"Bearer {token}"}) + except KeyError: + raise Exception("Grafana service account does not exist.") + + return headers + + def _get_org_by_name(self, org_name: str): + base_url = self._base_url(add_admin_user=True) + r = requests.get(f"{base_url}/orgs/name/{org_name}") + if not r.ok: + raise Exception(f"Grafana organization does not exist: {org_name} {r.text}") + + return r.json() + + def _set_org_by_name(self, org_name: str) -> bool: + base_url = self._base_url(add_admin_user=True) + headers = self._headers(add_bearer_token=False) + org = self._get_org_by_name(org_name=org_name) + r = requests.post(f"{base_url}/user/using/{org['id']}", headers=headers) + if r.ok: + return True + + raise Exception( + f"Grafana could no switch the organization: {org_name} {r.text}" + ) + + def _create_service_account(self): + base_url = self._base_url(add_admin_user=True) + headers = self._headers(add_bearer_token=False) + + account_slug = self.env.account.slug + self._set_org_by_name(org_name=account_slug) + service_account_name = f"{self.env.slug}-admin" + + # Delete old service accounts + r = requests.get( + url=f"{base_url}/serviceaccounts/search?perpage=10&page=1&query={service_account_name}" + ) + if r.ok: + result = r.json() + if result["totalCount"] > 0: + for sa in result["serviceAccounts"]: + sa_id = sa["id"] + requests.delete(url=f"{base_url}/serviceaccounts/{sa_id}") + + else: + raise Exception( + f"Grafana error getting service account: {service_account_name}" + ) + + r = requests.post( + url=f"{base_url}/serviceaccounts", + headers=headers, + json={"name": service_account_name, "role": "Admin"}, + ) + + if r.ok: + # Create a Service Account token for the service account created in the previous step + service_account = r.json() + r = requests.post( + url=f"{base_url}/serviceaccounts/{service_account['id']}/tokens", + headers=headers, + json={"name": self.env.slug}, + ) + + if r.ok: + self.env.grafana_config.update( + { + "credentials": { + "service_account": { + "id": service_account["id"], + "name": service_account["name"], + "token": r.json(), + } + } + } + ) + + else: + raise Exception( + f"Grafana error creating service account token: {r.text}" + ) + + else: + raise Exception(f"Grafana error creating service account: {r.text}") + + # Set the main organization + self._set_org_by_name(org_name="datacoves-main") + + def _create_folder(self): + base_url = self._base_url() + headers = self._headers() + + folder_name = self._normalize_name(f"env-{self.env.slug}") + folder_uid = self.env.slug + data = None + + # Validate if folder exists + r = requests.get(url=f"{base_url}/folders/{folder_uid}", headers=headers) + if r.ok: + data = r.json() + + else: + r = requests.post( + f"{base_url}/folders", + headers=headers, + json={"title": folder_name, "uid": folder_uid}, + ) + if r.ok: + data = r.json() + + if data: + self.env.grafana_config.update( + { + "folder": { + "id": data["id"], + "uid": data["uid"], + "title": data["title"], + } + } + ) + + else: + raise Exception(f"Grafana error creating folder {folder_name}: {r.text}") + + def _get_datasource_by_uid(self, uid: str) -> Optional[dict]: + base_url = self._base_url() + headers = self._headers() + + r = requests.get(url=f"{base_url}/datasources/uid/{uid}", headers=headers) + if r.ok: + data = r.json() + return {"name": data["name"], "uid": data["uid"]} + + return None + + def _delete_datasource_by_uid(self, uid: str) -> bool: + base_url = self._base_url() + headers = self._headers() + r = requests.delete(url=f"{base_url}/datasources/uid/{uid}", headers=headers) + return r.ok + + def _create_datasource(self, payload: dict): + """Grafana create or update datasource by environment""" + base_url = self._base_url() + headers = self._headers() + + ds_name = payload["name"] + ds_uid = payload["uid"] + + r = requests.post( + url=f"{base_url}/datasources", + headers=headers, + json=payload, + ) + + if not r.ok: + raise Exception(f"Grafana error creating datasource {ds_name}: {r.text}") + + return {"name": ds_name, "uid": ds_uid} + + def _create_datasource_prometheus_mimir(self): + """Loki datadource""" + + ds_name = f"Prometheus mimir {self.env.slug}" + + # It was already created + key_config = "prometheus" + ds = self.env.grafana_config.get("datasources", {}).get(key_config) + if ds: + return {key_config: ds} + + # Datasource already exists + ds_uid = f"prometheus-{self.env.slug}" + ds = self._get_datasource_by_uid(uid=ds_uid) + if ds: + self._delete_datasource_by_uid(uid=ds_uid) + + payload = { + "uid": ds_uid, + "name": ds_name, + "type": "prometheus", + "typeName": "Prometheus", + "access": "proxy", + "url": "http://mimir-nginx/prometheus", + "isDefault": True, + "basicAuth": False, + "jsonData": {"httpHeaderName1": "X-Scope-OrgID", "timeout": 300}, + "secureJsonData": {"httpHeaderValue1": self.env.k8s_namespace}, + } + + ds = self._create_datasource(payload=payload) + if ds: + return {key_config: ds} + + raise Exception(f"Failed to create datasource: {ds_name}") + + def _create_datasource_loki(self): + """Loki datadource""" + + ds_name = f"Loki {self.env.slug}" + # It was already created + key_config = "loki" + ds = self.env.grafana_config.get("datasources", {}).get(key_config) + if ds: + return {key_config: ds} + + # Datasource already exists + ds_uid = f"loki-{self.env.slug}" + ds = self._get_datasource_by_uid(uid=ds_uid) + if ds: + self._delete_datasource_by_uid(uid=ds_uid) + + payload = { + "uid": ds_uid, + "name": ds_name, + "type": "loki", + "typeName": "Loki", + "access": "proxy", + "url": "http://loki-loki-distributed-gateway", + "basicAuth": False, + "jsonData": {"httpHeaderName1": "X-Scope-OrgID", "timeout": 300}, + "secureJsonData": {"httpHeaderValue1": self.env.k8s_namespace}, + } + + ds = self._create_datasource(payload=payload) + if ds: + return {key_config: ds} + + raise Exception(f"Failed to create datasource: {ds_name}") + + def _create_datadource_airflow_db(self): + """Airflow database datasource""" + + ds_name = f"Airflow database {self.env.slug}" + + # It was already created + key_config = "airflow_db" + ds = self.env.grafana_config.get("datasources", {}).get(key_config) + if ds: + return {key_config: ds} + + db_config = self.env.airflow_config["db"] + if ( + "host" in db_config + and "user" in db_config + and "database" in db_config + and "password" in db_config + ): + pgbouncer_enabled = self.env.airflow_config.get("pgbouncer", {}).get( + "enabled", False + ) + sslmode = db_config.get( + "sslmode", + "disable" + if pgbouncer_enabled or not self.env.cluster.is_local + else "require", + ) + host = ( + f"{self.env.slug}-airflow-pgbouncer.{self.env.k8s_namespace}" + if pgbouncer_enabled + else db_config["host"] + ) + port = 6543 if pgbouncer_enabled else db_config.get("port", 5432) + database = ( + f"{self.env.slug}-airflow-metadata" + if pgbouncer_enabled + else db_config["database"] + ) + + # Datasource already exists + ds_uid = f"airflow-db-{self.env.slug}" + ds = self._get_datasource_by_uid(uid=ds_uid) + if ds: + self._delete_datasource_by_uid(uid=ds_uid) + + payload = { + "uid": ds_uid, + "name": ds_name, + "type": "postgres", + "url": f"{host}:{port}", + "access": "proxy", + "basicAuth": True, + "user": db_config["user"], + "database": database, + "jsonData": {"sslmode": sslmode}, + "secureJsonData": {"password": db_config["password"]}, + } + + ds = self._create_datasource(payload=payload) + if ds: + return {"airflow_db": ds} + + raise Exception(f"Failed to create datasource: {ds_name}") + + def _create_datasources(self): + """Grafana create or update datasource by environment""" + datasources = self.env.grafana_config.get("datasources", {}) + datasources.update(self._create_datasource_prometheus_mimir()) + datasources.update(self._create_datasource_loki()) + + if self.env.is_service_enabled_and_valid(settings.SERVICE_AIRFLOW): + datasources.update(self._create_datadource_airflow_db()) + + # Datasources + self.env.grafana_config.update({"datasources": datasources}) + + def _load_dashboards(self): + """Grafana load dashboard by environment""" + folder_id = self.env.grafana_config.get("folder", {}).get("id") + if folder_id is None: + raise Exception(f"Grafana folder does not exist for {self.env.slug}") + + base_url = self._base_url() + headers = self._headers() + + path_list = settings.BASE_DIR / "clusters/observability/grafana/dashboards" + for path in path_list.glob("**/*.json"): + with open(path, "r") as f: + if not self.env.is_service_enabled_and_valid( + settings.SERVICE_AIRFLOW + ) and f.name.lower().startswith("airflow"): + continue + + dashboard = None + dashboard = f.read() + + # Update datasources + dashboard = dashboard.replace( + "ds-airflow-db", f"airflow-db-{self.env.slug}" + ) + dashboard = dashboard.replace( + "ds-prometheus", f"prometheus-{self.env.slug}" + ) + dashboard = dashboard.replace("ds-loki", f"loki-{self.env.slug}") + + dashboard = json.loads(dashboard) + dashboard["tags"].extend(["Datacoves", self.env.slug.upper()]) + dashboard["uid"] = self._normalize_name( + f"{dashboard['title']}-{self.env.slug}" + ) + dashboard["id"] = None + + for var in dashboard["templating"]["list"]: + if var["name"] == "env": + var["query"] = self.env.slug + + payload = { + "dashboard": dashboard, + "folderId": folder_id, + "message": f"Changes made by workspace {self.env.workspace_generation}", + "overwrite": True, + } + + r = requests.delete( + url=f"{base_url}/dashboards/uid/{dashboard['uid']}", headers=headers + ) + r = requests.post( + url=f"{base_url}/dashboards/db", headers=headers, json=payload + ) + + if not r.ok: + raise Exception( + f"Grafana dashboard {dashboard['title']} could not be created or updated: {r.text}" + ) diff --git a/src/core/api/app/clusters/prometheus.py b/src/core/api/app/clusters/prometheus.py new file mode 100644 index 00000000..6248ca9a --- /dev/null +++ b/src/core/api/app/clusters/prometheus.py @@ -0,0 +1,87 @@ +import requests +from django.conf import settings + +from lib.utils import day_interval_until_now + +### get_by_label_pods_running_day_total_seconds ### + + +def get_by_label_pods_running_day_total_seconds( + day, namespace, label, pattern, container=None +): + # The kube_pod_container_status_running metric doesn't have prometheus + # labels that let us filter by k8s labels. Only the kube_pod_labels does, + # so we must do a join by pod uid on the two series to filter by k8s label. + # We do two queries, one for each of those metrics, scoped to the namespace + # and do the join in python. We might be able to get it to a single query + # by doing the join in prometheus (left_group?). + t_a, t_b = day_interval_until_now(day) + pods = get_pods_labeled(namespace, label, pattern, t_a, t_b) + pod_uids = {pod["uid"] for pod in pods} + containers_running_seconds = query( + f""" + sum_over_time(kube_pod_container_status_running{{ + namespace='{namespace}', + }}[{int(t_b - t_a)}s:1s]) + """, + time=t_b, + ) + total = 0.0 + for container_series in containers_running_seconds["result"]: + metric = container_series["metric"] + pod_uid = metric["uid"] + value = float(container_series["value"][1]) + if pod_uid in pod_uids: + total += value + return total + + +def get_pods_labeled(namespace, label, pattern, t_a, t_b): + """ + Query prometheus for pods with a given label and a label value matching the + pattern. Results are constrained to the time interval given. + """ + assert t_a < t_b + assert pattern not in ("", ".*"), "pattern too broad, would match all pods" + label = label.replace("-", "_") + response = query( + f""" + last_over_time(kube_pod_labels{{ + namespace='{namespace}', + label_{label}=~'{pattern}' + }}[{int(t_b - t_a)}s:1s]) + """, + time=t_b, + ) + return [ + {"uid": x["metric"]["uid"], "pod": x["metric"]["pod"]} + for x in response["result"] + ] + + +### query ### + + +log_query = None +log_query_response = None + + +def query(q, **params): + """Run a prometheus query.""" + params["query"] = q + if log_query: + log_query(params) + response = requests.get(f"{settings.PROMETHEUS_API_URL}/query", params=params) + # TODO: Make sure the response from 4xx responses make it to sentry. + response.raise_for_status() + msg = response.json() + if msg.get("status") != "success": + raise Exception( + error_type=msg.get("error_type"), + error=msg.get("error"), + warnings=msg.get("warnings"), + ) + data = msg["data"] + if log_query_response: + log_query_response(data) + return data diff --git a/src/core/api/app/clusters/request_utils.py b/src/core/api/app/clusters/request_utils.py new file mode 100644 index 00000000..a4da7dcf --- /dev/null +++ b/src/core/api/app/clusters/request_utils.py @@ -0,0 +1,50 @@ +from projects.models.environment import Environment + +from .models import Cluster + +NAMESPACE_PREFIX = "dcw-" + + +def get_cluster(request) -> Cluster: + api_host = request.META["HTTP_HOST"] + domain = api_host.replace("api.", "") + try: + return Cluster.objects.get(domain=domain) + except Cluster.DoesNotExist: + return Cluster.objects.first() + + +def get_services_resources( + env: Environment, + services=["webserver", "workers", "statsd", "scheduler", "triggerer"], +): + """ + Returns resources requests and limits for a list of services from k8s specs + """ + resources = {} + + def get_service_resource(ns_deployment_item): + container = ns_deployment_item.spec.template.spec.containers[0] + if container.name in services: + limits = container.resources.__dict__["_limits"] + requests = container.resources.__dict__["_requests"] + if limits and requests: + return { + container.name: { + "limits": {"cpu": limits["cpu"], "memory": limits["memory"]}, + "requests": { + "cpu": requests["cpu"], + "memory": requests["memory"], + }, + } + } + return {} + + if env.cluster.defines_resource_requests: + kc = env.cluster.kubectl + ns_deployments = kc.AppsV1Api.list_namespaced_deployment( + f"{NAMESPACE_PREFIX}{env.slug}" + ) + for item in ns_deployments.items: + resources.update(get_service_resource(item)) + return resources diff --git a/src/core/api/app/clusters/signals.py b/src/core/api/app/clusters/signals.py new file mode 100644 index 00000000..1a9a931a --- /dev/null +++ b/src/core/api/app/clusters/signals.py @@ -0,0 +1,404 @@ +import logging +import re + +import requests +from billing.models import Plan +from codegen.models import Template +from django.contrib.auth.models import Permission +from django.db.models import Q +from django.db.models.signals import m2m_changed, post_delete, post_save +from django.dispatch import receiver +from integrations.models import Integration +from projects.models import ( + ConnectionTemplate, + Environment, + EnvironmentIntegration, + Profile, + ProfileFile, + Project, + ServiceCredential, + UserCredential, + UserEnvironment, + UserRepository, +) +from users.models import Account, Group, User + +from lib.utils import get_pending_tasks, m2m_changed_subjects_and_objects + +from . import workspace +from .models import Cluster + +logger = logging.getLogger(__name__) + + +@receiver( + post_save, sender=Environment, dispatch_uid="clusters.handle_environment_post_save" +) +def handle_environment_post_save(sender, **kwargs): + env = kwargs["instance"] + workspace.sync(env, "signals.handle_environment_post_save") + + +@receiver( + post_delete, + sender=Environment, + dispatch_uid="clusters.handle_environment_post_delete", +) +def handle_environment_post_delete(sender, **kwargs): + env = kwargs["instance"] + if env.workspace_generation: + workspace.delete(env) + + +@receiver(post_save, sender=Cluster, dispatch_uid="clusters.handle_cluster_post_save") +def handle_cluster_post_save(sender, **kwargs): + cluster = kwargs["instance"] + + if not kwargs["created"]: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in cluster.environments.all(): + workspace.sync( + env, "signals.handle_cluster_post_save", pending_tasks=pending_tasks + ) + + +@receiver(post_save, sender=Project, dispatch_uid="clusters.handle_project_post_save") +def handle_project_post_save(sender, **kwargs): + project = kwargs["instance"] + if not kwargs["created"]: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in project.environments.all(): + workspace.sync( + env, "signals.handle_project_post_save", pending_tasks=pending_tasks + ) + + +def sync_grafana_orgs(): + """ + Creates/updates orgs in grafana based on accounts + """ + # Creating orgs on grafana + cluster = Cluster.objects.current().only("id", "service_account").first() + + # For unit tests, cluster may be None here + if cluster is None: + return + + user = cluster.service_account.get("grafana", {}).get("username") + if not user: + logger.info("Grafana not configured on cluster") + return + + password = cluster.service_account.get("grafana", {}).get("password") + base_url = ( + f"http://{user}:{password}@prometheus-grafana.prometheus.svc.cluster.local/api" + ) + orgs = [account.slug for account in Account.objects.active_accounts()] + for org in orgs: + r = requests.post( + f"{base_url}/orgs", + headers={"Content-Type": "application/json; charset=utf-8"}, + json={"name": org}, + verify=False, + ) + if r.ok: + logger.info(f"Organization {org} created") + else: + if r.json()["message"] != "Organization name taken": + logger.error(f"Organization {org} creation error: {r.text}") + + # Reconfiguring grafana + kc = cluster.kubectl + cm = kc.CoreV1Api.read_namespaced_config_map("prometheus-grafana", "prometheus") + account_perms = " ".join([f"{org}:{org}:Viewer" for org in orgs]) + cm.data = { + "grafana.ini": re.sub( + r"org_mapping = (.*)\n", + f"org_mapping = {account_perms}\n", + cm.data["grafana.ini"], + ) + } + kc.CoreV1Api.replace_namespaced_config_map("prometheus-grafana", "prometheus", cm) + # Restarting grafana + kc.restart_deployment("prometheus-grafana", "prometheus") + + +@receiver(post_save, sender=Account, dispatch_uid="clusters.handle_account_post_save") +def handle_account_post_save(sender, **kwargs): + account = kwargs["instance"] + if not kwargs["created"]: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in Environment.objects.filter(project__account=account).distinct(): + workspace.sync( + env, "signals.handle_account_post_save", pending_tasks=pending_tasks + ) + else: + sync_grafana_orgs() + + +@receiver(post_save, sender=User, dispatch_uid="clusters.handle_user_post_save") +def handle_user_post_save(sender, **kwargs): + user = kwargs["instance"] + if not kwargs["created"] and not user.is_service_account: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in user.environments: + extra_params = { + "env_slug": env.slug, + "user_email": user.email, + "user_slug": user.slug, + } + workspace.sync( + env, + "signals.handle_user_post_save", + pending_tasks=pending_tasks, + **extra_params, + ) + + +@receiver( + post_save, + sender=UserEnvironment, + dispatch_uid="clusters.handle_user_environment_post_save", +) +def handle_user_environment_post_save(sender, **kwargs): + ue = kwargs["instance"] + # To monitor tasks + extra_params = { + "env_slug": ue.environment.slug, + "user_email": ue.user.email, + "user_slug": ue.user.slug, + } + + workspace.sync( + ue.environment, "signals.handle_user_environment_post_save", **extra_params + ) + + # This accelerates the code-servers wake up process by changing K8s resources immediately + workspace.sync_user_environment(ue) + + +@receiver( + post_save, + sender=EnvironmentIntegration, + dispatch_uid="clusters.handle_environment_integration_post_save", +) +def handle_environment_integration_post_save(sender, **kwargs): + ei = kwargs["instance"] + workspace.sync(ei.environment, "signals.handle_environment_integration_post_save") + + +@receiver( + post_delete, + sender=UserEnvironment, + dispatch_uid="clusters.handle_user_environment_post_delete", +) +def handle_user_environment_post_delete(sender, **kwargs): + ue = kwargs["instance"] + extra_params = { + "env_slug": ue.environment.slug, + "user_email": ue.user.email, + "user_slug": ue.user.slug, + } + workspace.sync( + ue.environment, "signals.handle_user_environment_post_delete", **extra_params + ) + + +@receiver( + post_save, + sender=ConnectionTemplate, + dispatch_uid="clusters.handle_connection_post_save", +) +def handle_connection_post_save(sender, **kwargs): + connection_template = kwargs["instance"] + if not kwargs["created"]: + envs = list( + Environment.objects.filter( + Q(service_credentials__connection_template=connection_template) + | Q(user_credentials__connection_template=connection_template) + ).distinct() + ) + if envs: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in envs: + workspace.sync( + env, + "signals.handle_connection_post_save", + pending_tasks=pending_tasks, + ) + + +@receiver( + post_save, + sender=ServiceCredential, + dispatch_uid="clusters.handle_service_credential_post_save", +) +def handle_service_credential_post_save(sender, **kwargs): + credential = kwargs["instance"] + workspace.sync( + credential.environment, "signals.handle_service_credential_post_save" + ) + + +@receiver( + post_save, + sender=UserCredential, + dispatch_uid="clusters.handle_user_credential_post_save", +) +def handle_user_credential_post_save(sender, **kwargs): + credential = kwargs["instance"] + workspace.sync(credential.environment, "signals.handle_user_credential_post_save") + + +@receiver( + post_delete, + sender=UserCredential, + dispatch_uid="clusters.handle_user_credential_post_delete", +) +def handle_user_credential_post_delete(sender, **kwargs): + credential = kwargs["instance"] + workspace.sync(credential.environment, "signals.handle_user_credential_post_delete") + + +@receiver( + m2m_changed, + sender=User.groups.through, + dispatch_uid="clusters.handle_user_groups_changed", +) +def handle_user_groups_changed(sender, **kwargs): + _, group_pks = m2m_changed_subjects_and_objects(kwargs) + action = kwargs["action"] + if action in ("post_remove", "post_add"): + permissions_granted_by_groups = Permission.objects.filter( + group__in=group_pks, + name__contains="|workbench:", + ).values_list("name", flat=True) + envs = Environment.from_permission_names(permissions_granted_by_groups) + if envs: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in envs: + workspace.sync( + env, + "signals.handle_user_groups_changed", + pending_tasks=pending_tasks, + ) + + +@receiver( + m2m_changed, + sender=Group.permissions.through, + dispatch_uid="clusters.handle_group_permissions_changed", +) +def handle_group_permissions_changed(sender, **kwargs): + _, permission_pks = m2m_changed_subjects_and_objects(kwargs) + action = kwargs["action"] + if action in ("post_remove", "post_add"): + names = Permission.objects.filter( + pk__in=permission_pks, + name__contains="|workbench:", + ).values_list("name", flat=True) + envs = Environment.from_permission_names(names) + if envs: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in envs: + workspace.sync( + env, + "signals.handle_group_permissions_changed", + pending_tasks=pending_tasks, + ) + + +@receiver( + post_save, + sender=UserRepository, + dispatch_uid="clusters.handle_user_repository_post_save", +) +def handle_user_repository_post_save(sender, **kwargs): + user_repo = kwargs["instance"] + if not kwargs["created"]: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in Environment.objects.filter( + project__repository=user_repo.repository + ).distinct(): + workspace.sync( + env, + "signals.handle_user_repository_post_save", + pending_tasks=pending_tasks, + ) + + +@receiver(post_save, sender=Template, dispatch_uid="clusters.handle_template_post_save") +def handle_template_post_save(sender, **kwargs): + template = kwargs["instance"] + envs = [] + for profile_file in template.profile_files.all(): + for env in profile_file.profile.environments.all(): + envs.append(env) + if envs: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + workspace.sync( + env, "signals.handle_template_post_save", pending_tasks=pending_tasks + ) + + +@receiver(post_save, sender=Profile, dispatch_uid="clusters.handle_profile_post_save") +def handle_profile_post_save(sender, **kwargs): + profile = kwargs["instance"] + envs = profile.environments.all() + if envs: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in envs: + workspace.sync( + env, "signals.handle_profile_post_save", pending_tasks=pending_tasks + ) + + +@receiver( + post_save, sender=ProfileFile, dispatch_uid="clusters.handle_profile_file_post_save" +) +def handle_profile_file_post_save(sender, **kwargs): + profile_file = kwargs["instance"] + envs = profile_file.profile.environments.all() + if envs: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env in envs: + workspace.sync( + env, + "signals.handle_profile_file_post_save", + pending_tasks=pending_tasks, + ) + + +@receiver( + post_save, sender=Integration, dispatch_uid="clusters.handle_integration_post_save" +) +def handle_integration_post_save(sender, **kwargs): + integration = kwargs["instance"] + envs = integration.environments.all() + if envs: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for env_int in envs: + workspace.sync( + env_int.environment, + "signals.handle_integration_post_save", + pending_tasks=pending_tasks, + ) + + +@receiver( + post_save, + sender=Plan, + dispatch_uid="billing.handle_plan_post_save", +) +def handle_plan_post_save(sender, instance, *args, **kwargs): + environments = Environment.objects.filter( + project__account__plan=instance + ).distinct() + if environments: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + for environment in environments: + workspace.sync( + environment, + "signals.handle_plan_post_save", + pending_tasks=pending_tasks, + ) diff --git a/src/core/api/app/clusters/tasks.py b/src/core/api/app/clusters/tasks.py new file mode 100644 index 00000000..98a62fd9 --- /dev/null +++ b/src/core/api/app/clusters/tasks.py @@ -0,0 +1,435 @@ +import base64 +import logging +import os +from datetime import timedelta + +import requests +from clusters.cleanup_k8s_resources import ( + cleanup_cluster_k8s_extra_resources, + cleanup_k8s_resources, +) +from clusters.external_resources.postgres import create_read_only_user_for_service +from clusters.metrics import gen_prometheus_metrics +from clusters.models.cluster import Cluster, ClusterAlert +from credentials.models import Secret +from django.core.cache import cache +from django.utils import timezone +from gql import Client, gql +from gql.transport.exceptions import TransportQueryError +from gql.transport.requests import RequestsHTTPTransport +from kubernetes.client.exceptions import ApiException +from projects.models.environment import Environment +from rest_framework import status +from users.models import User + +import lib.kubernetes.client as k8s_client +from datacoves.celery import app + +logger = logging.getLogger(__name__) + + +@app.task +def delete_cluster_alerts_older(days_ago=14): + """ + Deletes all cluster alerts older than ago. + + Args: + days_ago (int, optional): To filter by created_at. Defaults to 14. + + Returns: + str: Summary of the number of records deleted. + """ + + some_days_ago = timezone.now() - timedelta(days=days_ago) + cluster_alert_deleted, _ = ClusterAlert.objects.filter( + created_at__lt=some_days_ago + ).delete() + return f"ClusterAlerts deleted [{cluster_alert_deleted}] < [{some_days_ago}]." + + +@app.task +def remove_k8s_resources(): + """Remove resources from kubernetes created by adapters + + Returns: + str: Summary of the resources removed. + """ + + def _get_resource_description(res) -> str: + kind = res["kind"] if isinstance(res, dict) else res.kind + name = res["metadata"]["name"] if isinstance(res, dict) else res.metadata.name + namespace = ( + res["metadata"]["namespace"] + if isinstance(res, dict) + else res.metadata.namespace + ) + description = f"{kind}/{name}" + if namespace: + description = f"{namespace}/{description}" + + return description + + k8s_res_to_delete = [] + cluster = Cluster.objects.first() + k8s_res_to_delete.extend(cleanup_cluster_k8s_extra_resources(cluster=cluster)) + + for env in Environment.objects.all(): + k8s_res_to_delete.extend(cleanup_k8s_resources(namespace=env.k8s_namespace)) + + k8s_res_to_delete.extend(cleanup_k8s_resources(namespace="core")) + k8s_res_to_delete.extend(cleanup_k8s_resources(namespace="prometheus")) + + for res in k8s_res_to_delete: + logger.info(f"Removing resource={_get_resource_description(res=res)}") + cluster.kubectl.delete(res=res) + + # Message on celery logs + if k8s_res_to_delete: + return f"Deleted {len(k8s_res_to_delete)} Kubernetes resources." + else: + return "No Kubernates resource found to be removed." + + +@app.task +def celery_heartbeat(): + """Celery heartbeat task to know if it's running OK""" + now = timezone.now() + Cluster.objects.update(celery_heartbeat_at=now) + return f"Celery heartbeat set at {now}" + + +@app.task +def update_grafana_datasources(): + """Task to update the tenant id of the Loki datasource + since the creation of namespaces are dynamic. + When Grafana is up and running for your organization, + we may need to make changes. + + Returns: + str: Task status + """ + + def _create_or_update_datasource(payload: dict, token: str) -> tuple: + """Grafana create or update datasource by environment""" + base_url = "http://prometheus-grafana.prometheus.svc.cluster.local/api" + headers = { + "Content-Type": "application/json; charset=utf-8", + "Authorization": f"Bearer {token}", + } + + ds_name = payload["name"] + ds_uid = payload["uid"] + + r = requests.get(url=f"{base_url}/datasources/uid/{ds_uid}", headers=headers) + if r.ok: + r = requests.put( + url=f"{base_url}/datasources/uid/{ds_uid}", + headers=headers, + json=payload, + ) + elif r.status_code == status.HTTP_404_NOT_FOUND: + r = requests.post( + url=f"{base_url}/datasources", headers=headers, json=payload + ) + + if r.ok: + message = f"Grafana datasource {ds_name}: created or updated" + else: + message = f"Grafana error creating datasource: {ds_name}: {r.text}" + + return r.ok, message + + cluster = Cluster.objects.only("id", "service_account").first() + if not cluster.is_feature_enabled("observability_stack"): + return "Obserbavility stack feature is disabled." + + token = cluster.service_account.get("grafana", {}).get("token") + if token is None: + return "Grafana service account does not exists" + + kc = cluster.kubectl + namespaces = kc.CoreV1Api.list_namespace() + namespaces = map(lambda ns: ns.metadata.name, namespaces.items) + ns_access = "|".join(namespaces) + + cache_key = f"cluster-{cluster.id}-k8s-namespaces" + ns_chached = cache.get(cache_key) + + if ns_chached is None or ns_chached != ns_access: + # LOKI + ds_name = "Datacoves Loki" + payload = { + "uid": "datacoves_loki", + "name": ds_name, + "type": "loki", + "typeName": "Loki", + "access": "proxy", + "url": "http://loki-loki-distributed-gateway", + "basicAuth": False, + "jsonData": {"httpHeaderName1": "X-Scope-OrgID", "timeout": 300}, + "secureJsonData": {"httpHeaderValue1": ns_access}, + } + + create_or_update, message = _create_or_update_datasource( + payload=payload, token=token + ) + logger.info(message) + + # PROMETHEUS-MIMIR + ds_name = "Prometheus Mimir" + payload = { + "uid": "datacoves_prometheus_mimir", + "name": ds_name, + "type": "prometheus", + "typeName": "Prometheus", + "access": "proxy", + "url": "http://mimir-nginx/prometheus", + "isDefault": False, + "basicAuth": False, + "jsonData": {"httpHeaderName1": "X-Scope-OrgID", "timeout": 300}, + "secureJsonData": {"httpHeaderValue1": ns_access}, + } + + create_or_update, message = _create_or_update_datasource( + payload=payload, token=token + ) + logger.info(message) + + if create_or_update: + cache.set(key=cache_key, value=ns_access, timeout=3600) + + # DATABASE + cache_key = f"cluster-{cluster.id}-grafana-datasource-db" + db_chached = cache.get(cache_key) + if db_chached is None: + sslmode = os.environ.get( + "DB_SSL_MODE", "disable" if cluster.is_local else "require" + ) + db_config = cluster.service_account.get("postgres_core_ro") + if db_config is None: + db_config = { + "host": os.environ["DB_HOST"], + "port": os.environ.get("DB_PORT", 5432), + "user": os.environ["DB_USER"], + "password": os.environ["DB_PASS"], + "database": os.environ["DB_NAME"], + } + + host = db_config["host"] + port = db_config["port"] + payload = { + "uid": "core-api-database", + "name": "Postgres Core API", + "type": "postgres", + "url": f"{host}:{port}", + "access": "proxy", + "basicAuth": True, + "user": db_config["user"], + "database": db_config["database"], + "jsonData": {"sslmode": sslmode}, + "secureJsonData": {"password": db_config["password"]}, + } + + create_or_update, message = _create_or_update_datasource( + payload=payload, token=token + ) + logger.info(message) + if create_or_update: + cache.set(key=cache_key, value=True, timeout=3600) + + +def _create_datahub_group(client, name): + """ + Creates a datahub group and assigns its role, using graphql's client + """ + query = gql( + 'mutation { createGroup(input: {id: "' + name + '", name: "' + name + '"}) }' + ) + try: + client.execute(query) + except TransportQueryError as ex: + if ex.errors[0]["message"] == "This Group already exists!": + pass + else: + raise + + query = gql( + 'mutation { batchAssignRole(input: {roleUrn: "urn:li:dataHubRole:' + + name + + '", actors: ["urn:li:corpGroup:' + + name + + '"]})}' + ) + client.execute(query) + + +@app.task(bind=True, default_retry_delay=15, max_retries=10) +def setup_db_read_only_for_service(self, env_slug: str, service_name: str): + try: + env = Environment.objects.get(slug=env_slug) + config_attr = f"{service_name.lower().replace('-', '_')}_config" + config = getattr(env, config_attr) + + # If the configuration already has a user configured, it is not configured again + if not (config and config.get("db") is not None): + raise Exception( + f"Failed to create read user on database for {env.slug}/{service_name}" + ) + + if config.get("db_read_only") is None: + db_ro_data = create_read_only_user_for_service( + env=env, service_name=service_name + ) + + if db_ro_data: + config.update({"db_read_only": db_ro_data}) + # data = {config_attr: config} + setattr(env, config_attr, config) + env.save() # We need to sync again + # Environment.objects.filter(id=env.id).update(**data) + return f"DB read-only user created for {env_slug}/{service_name}" + + except Exception as err: + logger.error("DB read-only user for %s/%s: %s", env_slug, service_name, err) + # The task is running asynchronously. + if not self.request.is_eager: + raise self.retry() + + +@app.task(bind=True, default_retry_delay=30, max_retries=30) +def setup_airflow_roles(self, env_slug: str): + from lib.airflow import AirflowAPI + + try: + env = Environment.objects.get(slug=env_slug) + airflow_api = AirflowAPI.for_environment_service_user(env=env) + role_op = airflow_api.get_role(role_name="Op") + if role_op is None: + raise Exception("Op role does not exist.") + + actions = [ + item + for item in role_op["actions"] + if item["resource"]["name"] != "Variables" + ] + + airflow_api.create_or_update_role(role_name="SysAdmin", actions=actions) + return f"Successfully updated roles for env {env_slug}" + + except Exception as err: + logger.error(err) + # The task is running asynchronously. + if not self.request.is_eager: + raise self.retry() + + +@app.task(bind=True, default_retry_delay=30, max_retries=30) +def setup_datahub_groups(self, env_slug: str): + """ + This task creates datahub groups if they do not exist and assign their + corresponding role using the graphqul api. + """ + + base_url = f"http://{env_slug}-datahub-datahub-frontend.dcw-{env_slug}.svc.cluster.local:9002" + r = requests.get(f"{base_url}/health") + if not (r.ok and r.text == "GOOD"): + raise self.retry() + + kc = k8s_client.Kubectl() + auth_secret_name = "datahub-auth-secrets" + try: + deploy = kc.AppsV1Api.read_namespaced_deployment( + f"{env_slug}-datahub-datahub-frontend", f"dcw-{env_slug}" + ) + except ApiException as e: + if e.status == 404: + # Retry if deployment was not created yet + raise self.retry() + else: + raise + + status = kc.deployment_status_from_conditions(deploy.status.conditions) + if not status["available"]: + if status["progressing"]: + # Retry if deployment is not ready yet + raise self.retry() + else: + logger.error( + f"Datahub front end deployment unhealthy on environemnt {env_slug}" + ) + return + + # Datahub is available + secret = kc.read( + { + "metadata": { + "namespace": f"dcw-{env_slug}", + "name": auth_secret_name, + }, + "apiVersion": "v1", + "kind": "Secret", + } + ) + + if not secret: + logger.error( + f"Datahub auth secret ({auth_secret_name}) not found on environemnt {env_slug}" + ) + return + pswd = base64.b64decode(secret.data["system_client_secret"]) + auth_header = f"Basic __datahub_system:{pswd.decode('utf-8')}" + + # We take this opportunity to also create the secret that any datacoves service could use to ingest data + env = Environment.objects.get(slug=env_slug) + Secret.objects.update_or_create( + slug=f"{env.slug}|datahub_rest_default", + project=env.project, + defaults={ + "value_format": Secret.VALUE_FORMAT_KEY_VALUE, + "sharing_scope": Secret.SHARED_ENVIRONMENT, + "environment": env, + "services": True, + "created_by": User.objects.get( + id=env.datahub_config["service_account_user_id"] + ), + "value": { + "conn_type": "datahub-rest", + "host": f"http://{env.slug}-datahub-datahub-gms:8080", + "extra": {"extra_headers": {"Authorization": auth_header}}, + }, + }, + ) + + transport = RequestsHTTPTransport( + url=f"{base_url}/api/v2/graphql", + headers={"Authorization": auth_header}, + ) + + client = Client(transport=transport) + try: + _create_datahub_group(client, "Admin") + _create_datahub_group(client, "Editor") + _create_datahub_group(client, "Reader") + except Exception: + # We found that the datahub frontend pod sometimes returns 500 errors when not ready + raise self.retry() + + +@app.task(bind=True, default_retry_delay=60, max_retries=10) +def setup_grafana_by_env(self, env_slug: str): + try: + from clusters.observability.grafana import GrafanaApi + + env = Environment.objects.get(slug=env_slug) + grafana_api = GrafanaApi(enviroment=env) + grafana_api.create_basic_config() + + except Exception as err: + logger.error(err) + raise self.retry() + + +@app.task +def prometheus_metrics(): + gen_prometheus_metrics() + return "ok" diff --git a/src/core/api/app/clusters/tests/__init__.py b/src/core/api/app/clusters/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/clusters/tests/test_cluster.py b/src/core/api/app/clusters/tests/test_cluster.py new file mode 100644 index 00000000..a0923a3b --- /dev/null +++ b/src/core/api/app/clusters/tests/test_cluster.py @@ -0,0 +1,64 @@ +from datetime import timedelta +from unittest.mock import patch + +from clusters.models import Cluster, ClusterAlert +from clusters.tasks import delete_cluster_alerts_older +from django.test import TestCase +from django.utils import timezone +from projects.models import Release + + +class KubectlMock: + """Mock class to Kubectl client""" + + def get_ingress_controller_ips(self): + return "10.0.0.10", "192.168.100.10" + + def get_cluster_apiserver_ips(self): + return {} + + +class TestDeleteClusterAlertsOlder(TestCase): + @patch("lib.kubernetes.client.Kubectl") + def setUp(self, mock_k8s_client) -> None: + mock_k8s_client.return_value = KubectlMock() + + release = Release.objects.create( + name="Release dummy", + commit="123456", + released_at=timezone.now(), + ) + + cluster = Cluster.objects.create( + domain="datacoveslocal.com", kubernetes_version="1.27", release=release + ) + + for i in range(0, 100): + ClusterAlert.objects.create( + started_at=timezone.now(), + name=f"Example {i + 1}", + cluster=cluster, + status="Resolved", + data={}, + ) + + # Force specific created_at for test + created_at = self.get_datetime_days_ago(days_ago=15) + for cluster_alert in ClusterAlert.objects.all()[:50]: + cluster_alert.created_at = created_at + cluster_alert.save() + + def get_datetime_days_ago(self, days_ago): + """Return datetime by days""" + return timezone.now() - timedelta(days=days_ago) + + def test_deleted_by_two_weeks_ago(self): + """Test task to delete ClusterAlerts older by days with task""" + regs = ClusterAlert.objects.count() + self.assertEqual(100, regs) + + result = delete_cluster_alerts_older(days_ago=14) + regs = ClusterAlert.objects.count() + + self.assertIn("ClusterAlerts deleted [50]", result) + self.assertEqual(50, regs) diff --git a/src/core/api/app/clusters/tests/test_code_server_resource.py b/src/core/api/app/clusters/tests/test_code_server_resource.py new file mode 100644 index 00000000..181b6c04 --- /dev/null +++ b/src/core/api/app/clusters/tests/test_code_server_resource.py @@ -0,0 +1,114 @@ +from unittest.mock import patch + +from clusters.adapters.code_server import CodeServerAdapter +from clusters.models.cluster import Cluster +from django.test import TestCase +from dotmap import DotMap +from factories import ClusterFactory, EnvironmentFactory, ProjectFactory + +NODE_MEMORY_GIGABYTE = 16 + + +class CeleryInspectMock: + """Mock class to Celery Inspect""" + + def reserved(self): + return {} + + +class NodeListMock: + @property + def items(self): + return [ + DotMap( + { + "status": { + "capacity": { + "cpu": "10", + "memory": f"{NODE_MEMORY_GIGABYTE}Gi", + "pods": "110", + } + } + } + ) + ] + + +class KubectlMock: + def get_ingress_controller_ips(self): + return "10.0.0.10", "192.168.100.10" + + def get_cluster_apiserver_ips(self): + return {} + + def get_nodes_by_selector(self, selector): + return NodeListMock() + + +class CodeServerResourceTest(TestCase): + def setUp(self): + self.project = ProjectFactory.create() + self.services = { + "code-server": {"valid": True, "enabled": True, "unmet_preconditions": []}, + } + self.internal_services = {} + + def create_cluster(self, mock_cluster, cluster_provider): + cluster = ClusterFactory.create( + provider=cluster_provider, + code_server_config={"overprovisioning": {"enabled": False}}, + ) + + return cluster + + @patch("lib.kubernetes.client.Kubectl", return_value=KubectlMock()) + @patch("datacoves.celery.app.control.inspect", return_value=CeleryInspectMock()) + def test_code_server_resource_on_aks(self, mock_cluster, mock_celery_inspect): + env = EnvironmentFactory.create( + cluster=self.create_cluster(mock_cluster, Cluster.AKS_PROVIDER), + project=self.project, + services=self.services, + internal_services=self.internal_services, + ) + + max_pods_by_node = 8 # AKS + request_memory = int(NODE_MEMORY_GIGABYTE / max_pods_by_node * 1024) + + env.code_server_config = CodeServerAdapter.get_cluster_default_config( + cluster=env.cluster + ) + resources = CodeServerAdapter.gen_resources(env=env) + code_server_config = env.code_server_config + self.assertIsNotNone(resources) + self.assertEqual( + code_server_config["max_code_server_pods_per_node"], max_pods_by_node + ) + self.assertEqual( + code_server_config["resources"]["requests"]["memory"], f"{request_memory}Mi" + ) + + @patch("lib.kubernetes.client.Kubectl", return_value=KubectlMock()) + @patch("datacoves.celery.app.control.inspect", return_value=CeleryInspectMock()) + def test_code_server_resource_on_eks(self, mock_cluster, mock_celery_inspect): + env = EnvironmentFactory.create( + cluster=self.create_cluster(mock_cluster, Cluster.EKS_PROVIDER), + project=self.project, + services=self.services, + internal_services=self.internal_services, + ) + + max_pods_by_node = 16 # EKS + request_memory = int(NODE_MEMORY_GIGABYTE / max_pods_by_node * 1024) + + env.code_server_config = CodeServerAdapter.get_cluster_default_config( + cluster=env.cluster + ) + resources = CodeServerAdapter.gen_resources(env=env) + code_server_config = env.code_server_config + self.assertIsNotNone(resources) + self.assertEqual( + code_server_config["max_code_server_pods_per_node"], max_pods_by_node + ) + self.assertEqual( + code_server_config["resources"]["requests"]["memory"], f"{request_memory}Mi" + ) diff --git a/src/core/api/app/clusters/views.py b/src/core/api/app/clusters/views.py new file mode 100644 index 00000000..c41c8e0d --- /dev/null +++ b/src/core/api/app/clusters/views.py @@ -0,0 +1,170 @@ +import logging +from datetime import datetime + +from django.http import Http404, JsonResponse +from django.utils.timezone import make_aware +from projects.models import Environment +from rest_framework import status, views +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from rest_framework.status import HTTP_200_OK + +from lib.tools import get_related_environment + +from .builder import WorkbenchBuilder +from .models import Cluster, ClusterAlert + +logger = logging.getLogger(__name__) + + +def healthcheck(request): + return JsonResponse({"status": "ok"}) + + +class WorkbenchStatus(views.APIView): + permission_classes = [IsAuthenticated] + + def get(self, request, environment_slug): + try: + status = ( + WorkbenchBuilder(request.user, environment_slug) + .check_permissions() + .status.check_status() + .build() + ) + + if status is None: + return Response(status=HTTP_200_OK) + + return Response(status, status=HTTP_200_OK) + + except Environment.DoesNotExist: + raise Http404() + + +class WorkbenchHeartbeat(views.APIView): + permission_classes = [IsAuthenticated] + + def get(self, request, environment_slug): + try: + WorkbenchBuilder( + request.user, environment_slug + ).check_permissions().heartbeat().build() + return Response(status=HTTP_200_OK) + + except Environment.DoesNotExist: + raise Http404() + + +class WorkbenchCodeServerRestart(views.APIView): + permission_classes = [IsAuthenticated] + + def post(self, request, environment_slug): + try: + WorkbenchBuilder( + request.user, environment_slug + ).check_permissions().code_server.restart().build() + return Response(status=HTTP_200_OK) + + except Environment.DoesNotExist: + raise Http404() + + +class WorkbenchCodeServerStartLocalAirflow(views.APIView): + permission_classes = [IsAuthenticated] + + def has_permission(self, request, view): + """Add in check for user being one of the sys admins of the project + or environment""" + user = request.user + env_slug = request.environment_slug + env = WorkbenchBuilder(request.user, env_slug).set_environment().environment + + return ( + super().has_permission(request, view) + and len(user.service_resource_permissions("airflow:admin", env)) > 0 + ) + + def post(self, request, environment_slug): + """Enable and start local airflow""" + try: + WorkbenchBuilder( + request.user, environment_slug + ).check_permissions().code_server.enable_local_airflow().build() + return Response(status=HTTP_200_OK) + + except Environment.DoesNotExist: + raise Http404() + + +class WorkbenchCodeServerSettings(views.APIView): + permission_classes = [IsAuthenticated] + + def post(self, request, environment_slug): + try: + WorkbenchBuilder( + request.user, environment_slug + ).check_permissions().code_server.update_settings(request.data).build() + return Response(status=HTTP_200_OK) + + except Environment.DoesNotExist: + raise Http404() + + def get(self, request, environment_slug): + try: + ue = ( + WorkbenchBuilder(request.user, environment_slug) + .check_permissions() + .code_server.build() + ) + return Response(ue.code_server_config, status=HTTP_200_OK) + + except Environment.DoesNotExist: + raise Http404() + + +class AlertView(views.APIView): + def post(self, request, *args, **kwargs): + cluster = Cluster.objects.current().first() + alerts = request.data.pop("alerts") + for alert_payload in alerts: + self.process_alert(alert_payload, cluster) + return Response(data={}, status=status.HTTP_200_OK) + + def process_alert(self, alert_payload, cluster): + status = alert_payload.get("status") + name = alert_payload.get("labels", {}).get("alertname", None) + alert_type = alert_payload.get("labels", {}).get("alert_type", None) + date = alert_payload["startsAt"] + if alert_type == "log": + # Loki Logs alerts have a different format than others. + # We need to truncate milliseconds to 6 digits + date = date[:-4] + "Z" + resolved = status == "resolved" + started_at = make_aware(datetime.strptime(date, "%Y-%m-%dT%H:%M:%S.%fZ")) + namespace = alert_payload.get("labels", {}).get("namespace", None) + environment = get_related_environment(namespace) if namespace else None + + previous_alerts = ClusterAlert.objects.filter( + name=name, + namespace=namespace, + cluster=cluster, + environment=environment, + resolved=False, + started_at=started_at, + ) + exists = previous_alerts.exists() + if not resolved and not exists: + alert = ClusterAlert.objects.create( + data=alert_payload, + cluster=cluster, + name=name, + namespace=namespace, + environment=environment, + status=status, + resolved=resolved, + started_at=started_at, + ) + alert.generate_notifications() + elif exists: + previous_alerts.update(resolved=True) diff --git a/src/core/api/app/clusters/workspace.py b/src/core/api/app/clusters/workspace.py new file mode 100644 index 00000000..42623b5b --- /dev/null +++ b/src/core/api/app/clusters/workspace.py @@ -0,0 +1,1223 @@ +import logging +import re +import threading +from copy import deepcopy +from http import HTTPStatus +from typing import Optional + +from clusters.tasks import setup_grafana_by_env +from django.conf import settings +from django.core.cache import cache +from django.db import transaction +from django.db.models import Prefetch +from django.utils import timezone +from kubernetes.client.exceptions import ApiException +from projects.models import ( + Environment, + ProfileFile, + Project, + Release, + UserCredential, + UserEnvironment, + UserRepository, +) +from tenacity import retry, stop_after_attempt, wait_fixed +from users.models import Account, User + +from datacoves.celery import app +from lib.dicts import deep_merge +from lib.kubernetes import make +from lib.utils import LOCK_EXPIRE, get_pending_tasks, task_lock + +from .adapters import EnvironmentAdapter +from .adapters.all import ADAPTERS, EXTERNAL_ADAPTERS, INTERNAL_ADAPTERS +from .adapters.code_server import CodeServerAdapter + +CRD_GROUP = "datacoves.com" +CRD_VERSION = "v1" +CRD_API_VERSION = f"{CRD_GROUP}/{CRD_VERSION}" +POMERIUM_MEM_REQ_PER_USER = 2 # Megabytes +CELERY_HEARTBEAT_TIMEOUT = 6 # minutes + +logger = logging.getLogger(__name__) + + +def sync(env: Environment, reason, run_async=True, pending_tasks=None, *args, **kwargs): + """ + Decides if calls sync synchronously or asynchronously, useful when using ipdb to debug this + The only place where this is called synchronously is Django admin, use that for debugging + """ + + if not env.sync: + logger.info("Sync not enabled for environment %s", env.slug) + return + + # Workaround to force tasks to be synchronous in integration test + if settings.RUN_TASKS_SYNCHRONOUSLY: + run_async = False + + # Call sync_task immediately, or on commit if in a transaction. + logger.info( + "Synchronizing workspace for environment %s reason=%s kwargs=%s", + env.slug, + reason, + kwargs, + ) + if run_async: + if pending_tasks is None: + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + + already_pending_task = False + for task in pending_tasks: + environment_id, reason = task.get("args") + if environment_id == env.id: + already_pending_task = True + break + if not already_pending_task: + transaction.on_commit( + lambda: sync_task.delay(env.id, reason, *args, **kwargs) + ) + else: + cache.set( + f"workspace_sync_need_interruption_{env.id}", + True, + timeout=None, + ) + logger.info( + "Sync task not executed since there is already one enqueued; requested interruption" + ) + else: + transaction.on_commit(lambda: sync_task(env.id, reason)) + + +@retry( + stop=stop_after_attempt(3), + wait=wait_fixed(5), + reraise=False, +) +def sync_user_environment(ue: UserEnvironment): + """ + If user code-server statefulset exists, we update replicas and restartedAt right away, + the heavier sync process (+ operator) will perform the full update later + """ + + try: + if not ue.environment.has_code_server: + return + + seconds_elapsed = (timezone.now() - ue.created_at).total_seconds() + if seconds_elapsed < 60 * 2: + # There are still some components that have not been created + logger.info( + "Code server for user %s was just created, not sync it", + str(ue), + ) + return + + logger.info("Synchronizing the user environment %s", str(ue)) + kc = ue.env.cluster.kubectl + body = { + "spec": { + "replicas": 1 if ue.is_code_server_enabled else 0, + "template": { + "metadata": { + "annotations": { + "kubectl.kubernetes.io/restartedAt": ue.code_server_restarted_at + } + } + }, + } + } + + kc.AppsV1Api.patch_namespaced_stateful_set( + f"code-server-{ue.user.slug}", + f"dcw-{ue.environment.slug}", + body, + pretty="true", + ) + except ApiException as e: + if e.status == HTTPStatus.NOT_FOUND: + # We catch and ignore 404 as deployment was not created yet by the operator + logger.info("The code server for user %s has not yet been created", str(ue)) + + else: + logger.error( + "Error while synchronizing user environment %s: %s", + str(ue), + str(e), + ) + + raise e + + except Exception as e: + logger.error( + "Error while synchronizing user environment %s: %s", + str(ue), + str(e), + ) + + raise e + + +@app.task +def sync_task(env_id, reason, *args, **kwargs): + lock_id = f"environment-lock-{env_id}" + cache_key = f"workspace_sync_need_interruption_{env_id}" + + with task_lock(lock_id) as acquired: + if acquired: + sync_task = SyncTask(env_id, reason) + attempts = 0 + + # We'll keep trying until we finish or throw an exception. + # If the cache key workspace_sync_need_interruption_ID is set + # to True, we will abort the run job early. + while not sync_task.run(): + logger.info( + "Workspace sync interruption received for environment %s -- re-running it attempt=%s", + env_id, + attempts, + ) + + if attempts > 10: + raise RuntimeError( + "We've tried 10 times to resync, this means something " + "is probably broken and needs to be looked at." + ) + + # Clear the interruption since we're about to re-run + cache.delete(cache_key) + + # Update lock + cache.touch(lock_id, LOCK_EXPIRE) + sync_task = SyncTask(env_id, reason) + + # Keep track of how many times we've tried + attempts += 1 + + # Make sure to unset the needs interruption cache variable + cache.delete(cache_key) + return f"Environment {env_id} synchronized" + + else: + cache.set(cache_key, True, timeout=None) + logger.info( + "Workspace sync could not acquire lock; requested interruption for environment %s", + env_id, + ) + return ( + f"Environment {env_id} is already syncing - signaled for it to abort." + ) + + +class SyncTask: + """A class to handle the sync task and to allow caching amongst the + different steps. + + NOTE: Rather than doing multiple updates to the Environment object + throughout the course of execution, we accumulate updates in + self.env_udpates and commit them at the end of the 'run' call. + + Please be mindful of how this works if you alter this class. + """ + + def __init__(self, env_id: int, reason): + """Takes the environment ID to sync and loads common data items""" + + # To grab profile files + prefetch_profile_files_from_file = Prefetch( + "profile__files_from__files", + queryset=ProfileFile.objects.select_related("template"), + ) + + prefetch_profile_files = Prefetch( + "profile__files", queryset=ProfileFile.objects.select_related("template") + ) + + # This monster will cache everything we need for the sync. + # It is key to avoiding queries, and often queries in loops. + self.env = ( + Environment.objects.filter(id=env_id) + .select_related( + "cluster", + "profile", + "profile__files_from", + "project", + "project__repository", + "project__account", + "project__deploy_key", + "release", + ) + .prefetch_related(prefetch_profile_files_from_file, prefetch_profile_files) + .first() + ) + + # We use this extensively as well, so let's go ahead and query + # it. + self.ue_list = UserEnvironment.objects.filter( + environment=self.env + ).select_related( + "user", + "environment", + "environment__project", + "environment__project__repository", + ) + + # Map user ID's to user environments. This is used in a few places. + self.ue_list_by_user = {ue.user_id: ue for ue in self.ue_list} + + # This doesn't seem to be used anywhere yet, but we have it if we + # need it. + self.reason = reason + + # What updates do we want to make to 'env'? Let's collect all + # the updates in one dict, so that we can apply them with one set, + # instead of 4+ + self.env_updates = {} + + def run(self) -> bool: + """This runs the common sync steps. + + Returns True if the process completes in some satisfactory fashion, + or False if it needs to be re-run from the start because we got an + interruption request. + + The interruption request happens when the cache key + workspace_sync_need_interruption_ENVID is set. + """ + + # If env didn't load, or doesn't have the sync bit set, let's + # skip. + if not self.env or not self.env.sync: + return True + + cache_key = f"workspace_sync_need_interruption_{self.env.id}" + + if cache.get(cache_key, False): + return False + + # TODO: Separate set_default_configs from sync. + # SDC: This is the original comment, not my comment; I'm not + # sure why we need to separate this. + self.set_default_configs() + + if cache.get(cache_key, False): + return False + + self.sync_external_resources() + + if cache.get(cache_key, False): + return False + + self.validate_preconditions() + + if cache.get(cache_key, False): + return False + + try: + self.sync() + except ApiException as e: + if e.status == 409: # Conflict -- let's retry + logger.info("Got a conflict from self.sync -- will retry") + logger.info(e) + return False + + # Otherwise, rethrow, it's a new exception. + raise + + if cache.get(cache_key, False): + return False + + self.on_post_enabled() + + if cache.get(cache_key, False): + return False + + # The previous calls may alter self.env but do not save it in order + # to avoid extra update queries. This will commit whatever we've + # got to the database. + # + # There's a potential problem with infinite loops in the save + # signal, so we do it like this to bypass the signal. + if self.env_updates: + Environment.objects.filter(id=self.env.id).update(**self.env_updates) + + # Return based on the cache key, to avoid a race condition where + # potentially we have started a new sync whilst committing to the + # Environments table ( ... really unlikely, but let's be sure) + return not cache.get(cache_key, False) + + def set_default_configs(self): + """Adds default configuration when missing. + This alters self.env but does not save it, so the caller is + responsible for calling env.save() when done updating it.""" + + def _get_adapter_default_config(adapters, new_configs): + for Adapter in adapters.values(): + if Adapter.is_enabled(self.env): + new_config = Adapter.get_default_config(self.env) + existing_config = getattr(self.env, Adapter.config_attr()) + if new_config != existing_config: + new_configs[Adapter.config_attr()] = new_config + setattr(self.env, Adapter.config_attr(), new_config) + + new_configs = {} + + # Processing external adapters first, so we can enable internal + # adapters + _get_adapter_default_config(EXTERNAL_ADAPTERS, new_configs) + + # Enabling internal adapters + for service_name, Adapter in INTERNAL_ADAPTERS.items(): + configs = _get_adapters_internal_services_config(self.env, service_name) + Adapter.enable_service(self.env, extra_config=configs) + + # Processing internal adapters default configs + _get_adapter_default_config(INTERNAL_ADAPTERS, new_configs) + + # Doing it this way keeps our in-memory env object in sync with + # the database. + for key, val in new_configs.items(): + self.env_updates[key] = val + + def sync_external_resources(self): + """Creates/updates external resources such as dbs, s3 buckets, etc.""" + + for Adapter in ADAPTERS.values(): + if Adapter.is_enabled(self.env): + Adapter.sync_external_resources(self.env) + + def validate_preconditions(self): + """Checks to make sure all preconditions are met, updating the + environment if necessary. This makes changes to the env_updates + dictionary instead of doing a direct query, so that must be + committed by the caller.""" + + unmet_preconditions = {} + + for Adapter in ADAPTERS.values(): + if Adapter.is_enabled(self.env): + adapter_unmets = Adapter.get_unmet_preconditions(self.env) + if adapter_unmets: + unmet_preconditions[Adapter.service_name] = adapter_unmets + + self.validate_user_preconditions(adapter=Adapter) + + services = deepcopy(self.env.services) + + for service, options in services.items(): + if options["enabled"]: + services[service]["valid"] = True + services[service]["unmet_preconditions"] = [] + + for service_key, unmets in unmet_preconditions.items(): + if unmets: + services[service_key]["valid"] = False + services[service_key]["unmet_preconditions"] = unmets + + # Doing it this way keeps our in-memory env object in sync with + # the database. + if services != self.env.services: + self.env_updates["services"] = services + self.env.services = services + + def validate_user_preconditions(self, adapter: EnvironmentAdapter): # noqa: C901 + """Validates user level preconditions only when the service is enabled. + Updates the user environment as needed. This makes changes to the + env_updates dictionary instead of doing a direct query, so that must be + committed by the caller. + + The linter complains about this being complex, but the complexity + is necessary to avoid looped queries. It doesn't make much sense + to break this up further as it is far more readable in one place. + """ + + if adapter.service_name not in settings.USER_SERVICES: + return + + bulk_unmets = adapter.get_user_unmet_preconditions_bulk(ue_list=self.ue_list) + + service_names = [adapter.service_name] + adapter.linked_service_names + for ue in self.ue_list: + unmet_preconditions = {} + adapter_unmets = bulk_unmets[ue.id] + + if adapter_unmets: + unmet_preconditions[adapter.service_name] = adapter_unmets + + for linked_service_name in adapter.linked_service_names: + unmet_preconditions[linked_service_name] = adapter_unmets + + else: + for linked_service_name in adapter.linked_service_names: + adapter_unmets = ( + adapter.get_user_linked_services_unmet_preconditions( + linked_service_name, ue + ) + ) + + if adapter_unmets: + unmet_preconditions[linked_service_name] = adapter_unmets + + services = ue.services.copy() if ue.services else {} + + for service in service_names: + unmet = unmet_preconditions.get(service) + if unmet: + services.update( + {service: {"valid": False, "unmet_preconditions": unmet}} + ) + else: + services.update( + {service: {"valid": True, "unmet_preconditions": []}} + ) + + # Only update UserEnvironment if it changed to avoid extra + # queries in a loop. + if ue.services != services: + # Avoid signal call + UserEnvironment.objects.filter(id=ue.id).update(services=services) + + def sync(self, log=None): + """Compares the environment's desired workspace with the current workspace + and creates or updates kubernetes resources as needed.""" + + # TODO: Account should have its own separate sync tied to the Account model. + # That requires some schema changes, and making decisions like if accounts + # can use more than one cluster or if they must have their own release fk. + # For now, we'll create the account from the workspace data here. + # + # SDC: This is the original comment, not mine. + self.sync_account(log) + + global last_sync_res, last_sync_workspace + + # This will save us from querying in a loop when generating user + # credentials + prefetch_creds = Prefetch( + "credentials", + queryset=UserCredential.objects.select_related( + "ssl_key", "connection_template", "connection_template__type" + ), + ) + + # This will prevent us from querying SSH keys in a loop + prefetch_repo_ssh = Prefetch( + "repositories", + queryset=UserRepository.objects.select_related("ssh_key", "repository"), + ) + + # This is used in many subsequent calls. + env_users = list( + self.env.users.exclude( + id__in=self.env.project.account.developers_without_license + ) + .prefetch_related(prefetch_creds, prefetch_repo_ssh) + .select_related("auth_token") + ) + + res, config_hashes, users_secret_names = gen_workspace_resources( + self.env, env_users, self.ue_list_by_user + ) + workspace = gen_workspace( + self.env, env_users, config_hashes, users_secret_names, self.ue_list_by_user + ) + + with last_sync_lock: + res_changed = last_sync_res != res + workspace_changed = last_sync_workspace != workspace + + if not res_changed and not workspace_changed: + return + + kc = self.env.cluster.kubectl + + if res_changed: + kc.apply_resources(self.env.k8s_namespace, res, log=log) + cache.delete(f"deployment-status:{self.env.k8s_namespace}") + + if workspace_changed: + _, _, workspace_res = kc.apply(workspace) + + with last_sync_lock: + last_sync_res = res + last_sync_workspace = workspace + + if workspace_changed: + generation = workspace_res.get("metadata", {}).get("generation") + # We set attributes with update() to avoid sending post_save signals + # which would cause an infinite loop because the signal calls this function. + self.env.workspace_generation = generation + self.env_updates["workspace_generation"] = generation + + def on_post_enabled(self): + """Calls each adapter when the service becomes enabled""" + clear_pomerium_cache = False + setup_grafana_resources = False + for service_name, adapter in ADAPTERS.items(): + service_cache_key = f"{self.env.slug}-{service_name}-enabled" + on_post_cache = cache.get(service_cache_key) + + if on_post_cache and adapter.is_enabled(self.env): + clear_pomerium_cache = True + setup_grafana_resources = True + + new_config = adapter.on_post_enabled(env=self.env) + if new_config: + config = getattr(self.env, adapter.config_attr()) + config.update(new_config) + self.env_updates[adapter.config_attr()] = config + + cache.delete(service_cache_key) + + if clear_pomerium_cache: + # We restart pomerium's redis every time new service(s) got enabled so we force + # a cache clear. When not, pomerium could try to reuse a token the service expired + self.env.cluster.kubectl.restart_deployment( + "pomerium-redis", self.env.k8s_namespace + ) + + if setup_grafana_resources and self.env.is_internal_service_enabled( + settings.INTERNAL_SERVICE_GRAFANA + ): + # Check if there are resources for Grafana for each environment + setup_grafana_by_env.apply_async((self.env.slug,), countdown=60) + + def sync_account(self, log=None): + global last_sync_account_res, last_sync_account_obj + + account = self.env.project.account + + res, config_hashes = gen_account_resources(account, self.env) + k8s_account = gen_account(account, self.env, config_hashes) + + with last_sync_account_lock: + res_changed = last_sync_account_res != res + account_changed = last_sync_account_obj != k8s_account + + if not res_changed and not account_changed: + return + + kc = self.env.cluster.kubectl + + if res_changed: + kc.apply_resources(account_namespace(account), res, log=log) + + if account_changed: + _, _, account_res = kc.apply(k8s_account) + + with last_sync_account_lock: + last_sync_account_res = res + last_sync_account_obj = k8s_account + + +# Optimization: We remember the last generated resources to avoid calling the +# k8s api if nothing has changed. +last_sync_lock = threading.Lock() +last_sync_res = None +last_sync_workspace = None + + +def delete(env: Environment): + """Deletes the kubernetes namespace associated with the environment/workspace.""" + kc = env.cluster.kubectl + kc.delete_namespace(env.k8s_namespace) + + +last_sync_account_lock = threading.Lock() +last_sync_account_res = None +last_sync_account_obj = None + + +def _not_found_reponse(service_name) -> dict: + return { + "status": "error", + "services": {service_name: "not_found"}, + "updated_at": timezone.now(), + } + + +def get_workloads_status(env: Environment) -> Optional[dict]: + return cache.get(f"workloads-status:{env.k8s_namespace}") + + +def user_workloads_status(ue: UserEnvironment) -> dict: + last_update_time = timezone.now() + user_deployments = user_deployment_names(ue=ue) + if not user_deployments: + return { + "status": "running", + "services": {}, + "updated_at": last_update_time, + } + + workloads_status = get_workloads_status(ue.environment) + if workloads_status is None: + return _not_found_reponse("cache") + + pomerium = workloads_status.get("pomerium") + if not pomerium or not pomerium.get("available", False): + return _not_found_reponse("pomerium") + + services = {} + containers = {} + overall_status = "running" + + for service, workload_names in user_deployments.items(): + # We can check more that one workload (deploy, statefulset) for a service + for workload_name in workload_names.split(","): + workload = workloads_status.get(workload_name) + if not workload: + workload = {} + service_status = "not_found" + if overall_status == "running": + overall_status = "in_progress" + + available = workload.get("available", False) + progressing = workload.get("progressing", True) + condition = workload.get("condition") + ready_replicas = workload.get("ready_replicas") + containers_statuses = workload.get("containers", []) + + last_update_time = ( + condition.last_update_time + if condition and hasattr(condition, "last_update_time") + else last_update_time + ) + service_status = "running" + + # code-server inactive might be available but with 0 replicas, which means it didn't start yet + if (not available and progressing) or (available and not ready_replicas): + service_status = "in_progress" + if overall_status == "running": + overall_status = "in_progress" + elif not available and not progressing: + service_status = "error" + overall_status = "error" + + services[service] = service_status + containers[service] = containers_statuses + + return { + "status": overall_status, + "services": services, + "updated_at": last_update_time, + "containers": containers, + } + + +def user_deployment_name_for_services() -> dict: + deployment_names = {} + for service_name, Adapter in EXTERNAL_ADAPTERS.items(): + deployment_names[service_name] = Adapter.deployment_name + return deployment_names + + +def user_deployment_names(ue: UserEnvironment) -> dict: + """ + Gets the user k8s deployment names to check, taking into account + both environment services and user environment services + """ + names_for_services = user_deployment_name_for_services() + deployment_names = {} + # we want to discard user services + enabled_services = ue.env.enabled_and_valid_services() - set(settings.USER_SERVICES) + if ue: + enabled_services |= ue.enabled_and_valid_services() + + can_access_services = { + p["service"] for p in gen_workspace_user_permission_names(ue) + } + services = enabled_services & can_access_services + + # we don't need to check local dbt docs is up and running + if settings.SERVICE_LOCAL_DBT_DOCS in services: + services.remove(settings.SERVICE_LOCAL_DBT_DOCS) + + for service in services: + deployment_names[service] = names_for_services[service].format( + user_slug=ue.user.slug, env_slug=ue.env.slug + ) + + return deployment_names + + +# Resource generation +def gen_workspace_resources(env: Environment, env_users, ue_list_by_user=None): + """This is used by callers outside this module, so it isn't brought into + our class. That said, it can benefit from some caching, so we'll + allow our class to pass in ue_list_by_user which is a dictionary mapping + user.id to UserEnvironment objects. If it is None, we'll use the + old behavior + """ + + namespace = env.k8s_namespace + + ns = gen_namespace(namespace) + + ns["metadata"]["labels"] = { + "k8s.datacoves.com/workspace": env.slug, # Required for network policies. + "k8s.datacoves.com/project": env.project.slug, + "k8s.datacoves.com/account": env.project.account.slug, + "k8s.datacoves.com/environment-type": env.type, + "k8s.datacoves.com/release": env.release.name, + } + res = [ns] + + if env.cluster.is_feature_enabled("block_workers"): + res.append(make.admission_webhook(env.slug, namespace)) + + quota_spec = env.get_quota() + + if quota_spec: + quota = make.namespace_quota(namespace=namespace, spec=env.get_quota()) + res.append(quota) + limit_range = make.namespace_limit_range(namespace=namespace) + res.append(limit_range) + + res.extend(gen_base_config(env)) + for Adapter in EXTERNAL_ADAPTERS.values(): + if Adapter.is_enabled_and_valid(env): + res.extend(Adapter.gen_resources(env)) + + for service_name, Adapter in INTERNAL_ADAPTERS.items(): + configs = _get_adapters_internal_services_config(env, service_name) + res.extend(Adapter.gen_resources(env, extra_config=configs)) + + users_secret_names = {} + + # If we don't have this already as a cached item, let's build the + # cache here. + if ue_list_by_user is None: + ue_list_by_user = { + ue.user_id: ue + for ue in UserEnvironment.objects.only( + "services", "variables", "user_id" + ).filter(user__in=env_users, environment=env) + } + + for user in env_users: + ue = ue_list_by_user.get(user.id) + + if ue and ue.is_service_valid(settings.SERVICE_CODE_SERVER): + config = CodeServerAdapter.gen_user_secrets(env, user, ue) + users_secret_names[user.slug] = config["metadata"]["name"] + res.append(config) + + return res, make.res_config_hashes(res), users_secret_names + + +def _get_adapters_internal_services_config(env: Environment, name: str): + configs = [] + for Adapter in EXTERNAL_ADAPTERS.values(): + if Adapter.is_enabled(env): + config = Adapter.get_internal_service_config(env, name) + if config: + configs.append(config) + return configs + + +def gen_workspace( + env: Environment, + env_users, + config_hashes: dict, + users_secret_names: dict, + ue_list_by_user: dict = None, +): + """Returns an environment's desired workspace spec. This is used outside + this module, so we are not making it part of our SyncTask class. + + ue_list_by_user is a dictionary that maps user IDs to user environments. + It is used for caching purposes and can be none if you do not have it. + """ + + ns = env.k8s_namespace + name = workspace_name(env) + project: Project = env.project + account: Account = project.account + release: Release = env.release + + annotations = { + "datacoves.com/release": release.name, + } + cluster = env.cluster + + workspace_spec = { + "account": account_name(account), + "project": project_name(project), + "accountSuspended": str(account.is_suspended(cluster)).lower(), + } + + cluster_config = { + "clusterDomain": cluster.domain, + "certManagerIssuer": cluster.cert_manager_issuer, + "externalDnsUrl": cluster.external_dns_url, + "internalDnsIp": cluster.internal_dns_ip, + "internalIp": cluster.internal_ip, + "externalIp": cluster.external_ip, + "clusterApiServerIps": cluster.api_server_ips, + "internalDbClusterIpRange": cluster.internal_db_cluster_ip_range, + "resourceRequirements": gen_resource_requirements(env, env_users), + "oidcUserId": settings.IDP_OIDC_USER_ID, + "nodeLocalDnsEnabled": str( + cluster.is_feature_enabled("node_local_dns_enabled") + ).lower(), + } + workspace_spec.update(cluster_config) + + images = gen_workspace_images( + release, env.profile.image_set, cluster.docker_registry + ) + user_images = gen_workspace_user_images(images) + images_config = { + "imageRegistry": env.docker_registry, + "imagePullSecret": env.docker_config_secret_name, + "images": images, + "releaseProfile": env.release_profile, + } + workspace_spec.update(images_config) + + dbt_config = { + "dbtHome": env.dbt_home_path, + } + workspace_spec.update(dbt_config) + + code_server_config = { + "dontUseWsgi": str(env.cluster.dont_use_uwsgi).lower(), + "cloneRepository": str(env.profile.clone_repository).lower(), + "localDbtDocsDisabled": str(not env.profile.dbt_local_docs).lower(), + "dbtSyncServerDisabled": str(not env.profile.dbt_sync).lower(), + } + workspace_spec.update(code_server_config) + + git_repo_config = { + "sshGitRepo": project.repository.git_url, + "httpGitRepo": project.repository.url, + "gitCloneStrategy": project.clone_strategy, + } + workspace_spec.update(git_repo_config) + + # TODO: These may be better in a separate adapter / configmap. + dbt_docs_config = { + "dbtDocsGitBranch": env.dbt_docs_config.get("git_branch"), + "dbtDocsAskpassUrl": env.dbt_docs_config.get("askpass_url", ""), + } + + workspace_spec.update(dbt_docs_config) + + workspace_spec["services"] = gen_workspace_spec_services(env) + workspace_spec["internalServices"] = gen_workspace_spec_internal_services(env) + + workspace_spec["charts"] = gen_workspace_spec_charts(env.release) + + workspace_spec["users"] = gen_workspace_spec_users( + env, env_users, users_secret_names, user_images, ue_list_by_user + ) + + workspace_spec["configs"] = config_hashes + + return { + "apiVersion": CRD_API_VERSION, + "kind": "Workspace", + "metadata": {"namespace": ns, "name": name, "annotations": annotations}, + "spec": workspace_spec, + } + + +def gen_workspace_images(release, image_set, registry: str): + images = {} + images.update(release.images) + + # Append imageset images + if image_set and image_set.images: + images.update(image_set.images_without_registry(registry)) + # Append core images + for image in release.core_images: + name, tag = image.split(":") + images[name] = tag + return images + + +def gen_workspace_user_images(workspace_images): + """Filter workspace images to those used by User pods.""" + return { + k: v + for k, v in workspace_images.items() + if "code-server" in k or "local-dbt-docs" in k or "dbt-core-interface" in k + } + + +# Used externally +def gen_account_resources(account: Account, env: Environment): + namespace = account_namespace(account) + ns = gen_namespace(namespace) + ns["metadata"]["labels"] = { + "k8s.datacoves.com/account": account.slug, # Required for network policies. + } + + res = [ns] + + if env.docker_config: + res.append(gen_docker_config_secret(env)) + + return res, make.res_config_hashes(res) + + +def gen_account(account: Account, env: Environment, config_hashes): + ns = account_namespace(account) + name = account_name(account) + release = env.release + + annotations = { + "datacoves.com/release": release.name, + } + + account_spec = {} + + images_config = { + "imageRegistry": env.docker_registry, + "imagePullSecret": env.docker_config_secret_name, + "images": release.images, + } + account_spec.update(images_config) + + account_spec["configs"] = config_hashes + + return { + "apiVersion": CRD_API_VERSION, + "kind": "Account", + "metadata": {"namespace": ns, "name": name, "annotations": annotations}, + "spec": account_spec, + } + + +def account_name(account: Account) -> str: + return account.slug + + +def account_namespace(account: Account) -> str: + return f"dca-{account.slug}" + + +def project_name(project: Project) -> str: + return project.slug + + +def workspace_name(env: Environment) -> str: + return env.slug + + +def gen_workspace_spec_services(env): + services = { + # Deprecated + "AirflowLogs": booldict_to_strdict({"enabled": False, "valid": False}) + } + for name, options in env.services.items(): + services[name] = booldict_to_strdict( + { + "enabled": options.get("enabled", isinstance(env, UserEnvironment)), + "valid": options.get("valid", False), + } + ) + + return services + + +# Merge with gen_workspace_spec_services? +def gen_workspace_spec_internal_services(env: Environment): + services = {} + for name, options in env.internal_services.items(): + services[name] = booldict_to_strdict(options) + return services + + +def gen_workspace_spec_charts(release: Release): + return { + "airbyte": release.airbyte_chart, + "airflow": release.airflow_chart, + "superset": release.superset_chart, + "minio": release.minio_chart, + "promtail": release.promtail_chart, + "elastic": release.elastic_chart, + "neo4j": release.neo4j_chart, + "postgresql": release.postgresql_chart, + "kafka": release.kafka_chart, + "datahub": release.datahub_chart, + } + + +def booldict_to_strdict(bd): + sd = {} + for k, v in bd.items(): + assert isinstance(v, bool) + sd[k] = str(v).lower() + return sd + + +def strdict_to_booldict(sd): + bd = {} + for k, v in sd.items(): + assert v in ("true", "false") + bd[k] = v == "true" + return bd + + +def gen_workspace_spec_users( + env: Environment, + env_users, + users_secret_names: dict, + user_images, + ue_list_by_user: dict = None, +): + """ue_list_by_user is a dictionary that maps user IDs to user environments. + It is used for caching purposes and can be none if you do not have it. + """ + + users = [] + + # This is going to iterate over all these users and query permissions + # for each one. Let's pre-cache the permissions so we don't have to + # do that so much. + user_env_perms = User.get_bulk_environment_permission_names(env_users, env) + + for user in env_users: + if ue_list_by_user is None or user.id not in ue_list_by_user: + ue, _ = UserEnvironment.objects.get_or_create(environment=env, user=user) + + if ue_list_by_user is not None: + ue_list_by_user[user.id] = ue + + else: + ue = ue_list_by_user[user.id] + + code_server_shareable = env.cluster.is_feature_enabled("shareable_codeserver") + code_server_exposures = env.cluster.is_feature_enabled("codeserver_exposures") + secret_name = users_secret_names.get(user.slug) + is_code_server_enabled = ue.is_code_server_enabled and secret_name is not None + + users.append( + { + "email": user.email, + "slug": user.slug, + "name": user.name, + "permissions": gen_workspace_user_permission_names(ue, user_env_perms), + "secretName": secret_name or f"{user.slug}-dummy", + "images": user_images, + "dbtHome": env.dbt_home_path, + "profile": env.profile.slug, + "cloneRepository": str(env.profile.clone_repository).lower(), + "codeServerDisabled": str(not is_code_server_enabled).lower(), + "codeServerAccess": ( + ue.code_server_access + if code_server_shareable + else ue.ACCESS_PRIVATE + ), + "localAirflowEnabled": str( + env.cluster.is_feature_enabled("local_airflow") + and is_code_server_enabled + and ue.code_server_local_airflow_active + ).lower(), + "codeServerShareCode": ue.code_server_share_code, + "codeServerExposures": ue.exposures if code_server_exposures else {}, + "localDbtDocsDisabled": str(not env.profile.dbt_local_docs).lower(), + "dbtSyncServerDisabled": str(not env.profile.dbt_sync).lower(), + "codeServerRestartedAt": ue.code_server_restarted_at, + "services": gen_workspace_spec_services(env), + "localAirflowEnvironment": CodeServerAdapter.get_env_vars( + env=env, + user=user, + ), + } + ) + + return users + + +def gen_workspace_user_permission_names( + ue: UserEnvironment, user_env_perms: dict = None +): + """ + If user_env_perms is provided as a mapping of a user to a list of + permissions objects, that will be used instead of an individual query + here. + """ + + # TODO: Treat read and write permissions differently + if user_env_perms is not None: + permission_names = user_env_perms.get(ue.user.id, []) + else: + permission_names = ue.user.get_environment_permission_names(ue.env) + + resource_re = r"workbench\:([a-z-_]+)[\:]?[a-z-_]*\|(?:read|write)" + services = {re.search(resource_re, p).group(1) for p in permission_names} + # removing code-server and local-dbt-docs if no more licenses available + if ue.user in ue.env.project.account.developers_without_license: + services -= set(settings.USER_SERVICES) + # The result needs to be stable to avoid triggering false changes in k8s + # resources, so we sort the permissions. + return [{"service": service} for service in sorted(services)] + + +def gen_namespace(ns_name): + return make.namespace(ns_name) + + +def gen_base_config(env: Environment): + res = [] + + if env.docker_config: + res.append(gen_docker_config_secret(env)) + + return res + + +def gen_docker_config_secret(env: Environment): + # The docker-config secret is the only one left using the annotation to + # trigger a reconciliation by the operator. The rest all trigger changes + # through hashed name changes that change the workspace "configs" field. + return make.docker_config_secret( + name=env.docker_config_secret_name, + annotations={"datacoves.com/workspace": workspace_name(env)}, + data=env.docker_config, + ) + + +def gen_resource_requirements(env: Environment, env_users): + cluster = env.cluster + if not cluster.defines_resource_requests: + return {} + + dbt_docs_resources = {"dbt-docs": env.dbt_docs_config.get("resources", {})} + + pomerium_req = 200 + if env.is_service_enabled(settings.SERVICE_CODE_SERVER): + # Request more memory every time 20 new users are added to the dev environment + users_mem = POMERIUM_MEM_REQ_PER_USER * len(env_users) + pomerium_req += users_mem - (users_mem % (20 * POMERIUM_MEM_REQ_PER_USER)) + pomerium_limit = pomerium_req + 200 + + resources = { + "pomerium": { + "requests": {"memory": f"{pomerium_req}Mi", "cpu": "100m"}, + "limits": {"memory": f"{pomerium_limit}Mi", "cpu": "200m"}, + }, + "code-server": env.code_server_config.get( + "resources", CodeServerAdapter.code_server_resources_default + ), + "code-server-dbt-docs": env.code_server_config.get( + "resources-dbt-docs", CodeServerAdapter.dbt_docs_resources_default + ), + "code-server-dbt-core-interface": env.code_server_config.get( + "resources-dbt-core-interface", + CodeServerAdapter.dbt_core_interface_resources_default, + ), + "dbt-docs": { + "requests": {"memory": "200Mi", "cpu": "50m"}, + "limits": {"memory": "700Mi", "cpu": "300m"}, + }, + "code-server-local-airflow": env.code_server_config.get( + "resources_local_airflow", CodeServerAdapter.local_airflow_resources_default + ), + } + + return deep_merge(dbt_docs_resources, resources) diff --git a/src/core/api/app/codegen/__init__.py b/src/core/api/app/codegen/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/codegen/admin.py b/src/core/api/app/codegen/admin.py new file mode 100644 index 00000000..fb4468e9 --- /dev/null +++ b/src/core/api/app/codegen/admin.py @@ -0,0 +1,85 @@ +from django import forms +from django.contrib import admin +from django.db import models +from django_json_widget.widgets import JSONEditorWidget +from django_object_actions import DjangoObjectActions +from projects.cryptography import RSA_KEY_TYPE, generate_ssl_key_pair + +from datacoves.admin import BaseModelAdmin + +from .models import SQLHook, Template + + +@admin.register(Template) +class TemplateAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("name", "format", "context_type", "account", "is_system_template") + list_filter = ("account",) + readonly_fields = ("slug", "created_by", "updated_by") + search_fields = ("name", "description", "account__name") + + def formfield_for_dbfield(self, db_field, request, **kwargs): + if db_field.name == "enabled_for": + return forms.MultipleChoiceField( + choices=Template.USAGES, + widget=forms.CheckboxSelectMultiple, + required=False, + help_text="Select the models that can access this template.", + ) + return super().formfield_for_dbfield(db_field, request, **kwargs) + + def save_model(self, request, obj, form, change): + if change: + obj.updated_by = request.user + else: + obj.created_by = request.user + obj.save() + + @admin.display(boolean=True) + def is_system_template(self, obj): + return obj.is_system_template + + +@admin.register(SQLHook) +class SQLHookAdmin(BaseModelAdmin, DjangoObjectActions, admin.ModelAdmin): + def generate_keypair(self, request, obj): + keypair = generate_ssl_key_pair(RSA_KEY_TYPE) + + if not obj.connection_overrides: + obj.connection_overrides = {"private_key": keypair["private"]} + + else: + obj.connection_overrides["private_key"] = keypair["private"] + + obj.public_key = keypair["public"] + obj.save() + + generate_keypair.label = "Generate Keypair" + generate_keypair.short_description = "Generate an SSL keypair for snowflake" + + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + list_display = ( + "name", + "account", + "project", + "environment", + "connection_type", + "enabled", + "is_system_sqlhook", + ) + list_filter = ("account",) + readonly_fields = ("slug", "created_by", "updated_by") + search_fields = ("name", "account__name", "project__name", "environment__name") + change_actions = ("generate_keypair",) + + def save_model(self, request, obj, form, change): + if change: + obj.updated_by = request.user + else: + obj.created_by = request.user + obj.save() + + @admin.display(boolean=True) + def is_system_sqlhook(self, obj): + return obj.is_system_sqlhook diff --git a/src/core/api/app/codegen/apps.py b/src/core/api/app/codegen/apps.py new file mode 100644 index 00000000..983834ae --- /dev/null +++ b/src/core/api/app/codegen/apps.py @@ -0,0 +1,9 @@ +from django.apps import AppConfig + + +class CodegenConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "codegen" + + def ready(self): + from . import signals # noqa F401 diff --git a/src/core/api/app/codegen/migrations/0001_initial.py b/src/core/api/app/codegen/migrations/0001_initial.py new file mode 100644 index 00000000..c063064a --- /dev/null +++ b/src/core/api/app/codegen/migrations/0001_initial.py @@ -0,0 +1,37 @@ +# Generated by Django 3.2.6 on 2022-07-07 22:26 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('users', '0002_user_is_service_account'), + ] + + operations = [ + migrations.CreateModel( + name='Template', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.CharField(max_length=250)), + ('description', models.TextField()), + ('content', models.TextField()), + ('is_global', models.BooleanField(default=False, help_text='Global templates are available on all accounts')), + ('context_type', models.CharField(choices=[('user_credential', 'User credential')], max_length=30)), + ('format', models.CharField(choices=[('json', 'JSON'), ('yaml', 'YAML'), ('python', 'Python'), ('sql', 'SQL'), ('sql_snowflake', 'Snowflake SQL'), ('sql_redshift', 'Redshift SQL'), ('sql_bigquery', 'BigQuery SQL')], max_length=30)), + ('account', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='users.account')), + ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_templates', to=settings.AUTH_USER_MODEL)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/codegen/migrations/0002_sqlhook.py b/src/core/api/app/codegen/migrations/0002_sqlhook.py new file mode 100644 index 00000000..646daaa5 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0002_sqlhook.py @@ -0,0 +1,35 @@ +# Generated by Django 3.2.6 on 2022-07-07 23:05 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0033_auto_20220707_1724'), + ('users', '0002_user_is_service_account'), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('codegen', '0001_initial'), + ] + + operations = [ + migrations.CreateModel( + name='SQLHook', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.CharField(max_length=250)), + ('connection_details', models.JSONField(default=dict)), + ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.account')), + ('connection_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sql_hooks', to='projects.connectiontype')), + ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='created_sql_hooks', to=settings.AUTH_USER_MODEL)), + ('template', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='codegen.template')), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/codegen/migrations/0003_sqlhook_trigger.py b/src/core/api/app/codegen/migrations/0003_sqlhook_trigger.py new file mode 100644 index 00000000..bc981140 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0003_sqlhook_trigger.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-07-18 15:58 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0002_sqlhook'), + ] + + operations = [ + migrations.AddField( + model_name='sqlhook', + name='trigger', + field=models.CharField(choices=[('user_credential_post_save', 'User credential post save')], default='user_credential_post_save', max_length=50), + preserve_default=False, + ), + ] diff --git a/src/core/api/app/codegen/migrations/0004_alter_template_context_type.py b/src/core/api/app/codegen/migrations/0004_alter_template_context_type.py new file mode 100644 index 00000000..60308d42 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0004_alter_template_context_type.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-07-20 14:54 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0003_sqlhook_trigger'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='context_type', + field=models.CharField(choices=[('user_credential', 'User credential'), ('dbt_profiles', 'DBT profiles')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0004_auto_20220720_1914.py b/src/core/api/app/codegen/migrations/0004_auto_20220720_1914.py new file mode 100644 index 00000000..3305946d --- /dev/null +++ b/src/core/api/app/codegen/migrations/0004_auto_20220720_1914.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-07-20 19:14 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0003_sqlhook_trigger'), + ] + + operations = [ + migrations.AddField( + model_name='sqlhook', + name='enabled', + field=models.BooleanField(default=False), + ), + migrations.AlterField( + model_name='template', + name='description', + field=models.TextField(blank=True, null=True), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0005_merge_20220721_1352.py b/src/core/api/app/codegen/migrations/0005_merge_20220721_1352.py new file mode 100644 index 00000000..cd0edfcd --- /dev/null +++ b/src/core/api/app/codegen/migrations/0005_merge_20220721_1352.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.6 on 2022-07-21 13:52 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0004_alter_template_context_type'), + ('codegen', '0004_auto_20220720_1914'), + ] + + operations = [ + ] diff --git a/src/core/api/app/codegen/migrations/0006_alter_template_context_type.py b/src/core/api/app/codegen/migrations/0006_alter_template_context_type.py new file mode 100644 index 00000000..6b518b40 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0006_alter_template_context_type.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-07-21 19:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0005_merge_20220721_1352'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='context_type', + field=models.CharField(choices=[('user_credential', 'User credential'), ('user_credentials', 'User credentials')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0007_auto_20220722_1510.py b/src/core/api/app/codegen/migrations/0007_auto_20220722_1510.py new file mode 100644 index 00000000..38fae763 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0007_auto_20220722_1510.py @@ -0,0 +1,26 @@ +# Generated by Django 3.2.6 on 2022-07-22 15:10 + +import autoslug.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0006_alter_template_context_type'), + ] + + operations = [ + migrations.AddField( + model_name='sqlhook', + name='slug', + field=autoslug.fields.AutoSlugField(default='', editable=False, populate_from='name', unique=True), + preserve_default=False, + ), + migrations.AddField( + model_name='template', + name='slug', + field=autoslug.fields.AutoSlugField(default='', editable=False, populate_from='name', unique=True), + preserve_default=False, + ), + ] diff --git a/src/core/api/app/codegen/migrations/0008_remove_template_is_global.py b/src/core/api/app/codegen/migrations/0008_remove_template_is_global.py new file mode 100644 index 00000000..9137fd89 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0008_remove_template_is_global.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-07-22 19:22 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0007_auto_20220722_1510'), + ] + + operations = [ + migrations.RemoveField( + model_name='template', + name='is_global', + ), + ] diff --git a/src/core/api/app/codegen/migrations/0009_alter_sqlhook_trigger.py b/src/core/api/app/codegen/migrations/0009_alter_sqlhook_trigger.py new file mode 100644 index 00000000..67ffdab3 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0009_alter_sqlhook_trigger.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-07-27 16:39 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0008_remove_template_is_global'), + ] + + operations = [ + migrations.AlterField( + model_name='sqlhook', + name='trigger', + field=models.CharField(choices=[('user_credential_post_save', 'User credential post save'), ('user_credential_pre_save', 'User credential pre save')], max_length=50), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0010_auto_20220803_2044.py b/src/core/api/app/codegen/migrations/0010_auto_20220803_2044.py new file mode 100644 index 00000000..e3043bf4 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0010_auto_20220803_2044.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-08-03 20:44 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0009_alter_sqlhook_trigger'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='context_type', + field=models.CharField(choices=[('none', 'No context'), ('user_credential', 'User credential'), ('user_credentials', 'User credentials')], max_length=30), + ), + migrations.AlterField( + model_name='template', + name='format', + field=models.CharField(choices=[('json', 'JSON'), ('yaml', 'YAML'), ('python', 'Python'), ('bash', 'Bash'), ('sql', 'SQL'), ('sql_snowflake', 'Snowflake SQL'), ('sql_redshift', 'Redshift SQL'), ('sql_bigquery', 'BigQuery SQL')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0011_alter_template_format.py b/src/core/api/app/codegen/migrations/0011_alter_template_format.py new file mode 100644 index 00000000..9d1377d7 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0011_alter_template_format.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-08-17 19:56 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0010_auto_20220803_2044'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='format', + field=models.CharField(choices=[('json', 'JSON'), ('yaml', 'YAML'), ('python', 'Python'), ('bash', 'Bash'), ('sql', 'SQL'), ('sql_snowflake', 'Snowflake SQL'), ('sql_redshift', 'Redshift SQL'), ('sql_bigquery', 'BigQuery SQL'), ('sqlfluff', 'Sqlfluff'), ('sqlfluffignore', 'Sqlfluff Ignore')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0012_alter_template_format.py b/src/core/api/app/codegen/migrations/0012_alter_template_format.py new file mode 100644 index 00000000..2bb1380f --- /dev/null +++ b/src/core/api/app/codegen/migrations/0012_alter_template_format.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-08-19 13:39 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0011_alter_template_format'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='format', + field=models.CharField(choices=[('json', 'JSON'), ('yaml', 'YAML'), ('python', 'Python'), ('bash', 'Bash'), ('sql', 'SQL'), ('sql_snowflake', 'Snowflake SQL'), ('sql_redshift', 'Redshift SQL'), ('sql_bigquery', 'BigQuery SQL'), ('sqlfluff', 'Sqlfluff'), ('sqlfluffignore', 'Sqlfluff Ignore'), ('html', 'HTML')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0013_alter_template_context_type.py b/src/core/api/app/codegen/migrations/0013_alter_template_context_type.py new file mode 100644 index 00000000..6ef96c86 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0013_alter_template_context_type.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-09-05 15:36 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0012_alter_template_format'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='context_type', + field=models.CharField(choices=[('none', 'No context'), ('user_credential', 'User credential'), ('user_credentials', 'User credentials'), ('environment', 'Environment')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0014_auto_20220920_1805.py b/src/core/api/app/codegen/migrations/0014_auto_20220920_1805.py new file mode 100644 index 00000000..484427ae --- /dev/null +++ b/src/core/api/app/codegen/migrations/0014_auto_20220920_1805.py @@ -0,0 +1,25 @@ +# Generated by Django 3.2.6 on 2022-09-20 18:05 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0066_connection_connection_user'), + ('codegen', '0013_alter_template_context_type'), + ] + + operations = [ + migrations.AddField( + model_name='sqlhook', + name='environment', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.environment'), + ), + migrations.AddField( + model_name='sqlhook', + name='project', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.project'), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0015_auto_20220928_1555.py b/src/core/api/app/codegen/migrations/0015_auto_20220928_1555.py new file mode 100644 index 00000000..14fc2ecc --- /dev/null +++ b/src/core/api/app/codegen/migrations/0015_auto_20220928_1555.py @@ -0,0 +1,38 @@ +# Generated by Django 3.2.6 on 2022-09-28 15:55 + +import autoslug.fields +import codegen.models +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('codegen', '0014_auto_20220920_1805'), + ] + + operations = [ + migrations.AlterField( + model_name='sqlhook', + name='created_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_sql_hooks', to=settings.AUTH_USER_MODEL), + ), + migrations.AlterField( + model_name='sqlhook', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, populate_from=codegen.models.sqlhook_slug, unique=True), + ), + migrations.AlterField( + model_name='template', + name='created_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_templates', to=settings.AUTH_USER_MODEL), + ), + migrations.AlterField( + model_name='template', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, populate_from=codegen.models.template_slug, unique=True), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0016_auto_20230206_1930.py b/src/core/api/app/codegen/migrations/0016_auto_20230206_1930.py new file mode 100644 index 00000000..212074d0 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0016_auto_20230206_1930.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.16 on 2023-02-06 19:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0015_auto_20220928_1555'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='context_type', + field=models.CharField(choices=[('none', 'No context'), ('user_credential', 'User credential'), ('user_credentials', 'User credentials'), ('environment', 'Environment'), ('user', 'User')], help_text='Fields per context: user (email, name, username, slug), user credenial (user, ssl_public_key), user credenials (environment, connections), environment (dbt_home_path, type, slug, settings)', max_length=30), + ), + migrations.AlterField( + model_name='template', + name='format', + field=models.CharField(choices=[('json', 'JSON'), ('yaml', 'YAML'), ('python', 'Python'), ('bash', 'Bash'), ('sql', 'SQL'), ('sql_snowflake', 'Snowflake SQL'), ('sql_redshift', 'Redshift SQL'), ('sql_bigquery', 'BigQuery SQL'), ('sqlfluff', 'Sqlfluff'), ('sqlfluffignore', 'Sqlfluff Ignore'), ('html', 'HTML'), ('ini', 'INI config')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0017_alter_template_format.py b/src/core/api/app/codegen/migrations/0017_alter_template_format.py new file mode 100644 index 00000000..3cd7415d --- /dev/null +++ b/src/core/api/app/codegen/migrations/0017_alter_template_format.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-02-22 21:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0016_auto_20230206_1930'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='format', + field=models.CharField(choices=[('none', 'No format'), ('json', 'JSON'), ('yaml', 'YAML'), ('python', 'Python'), ('bash', 'Bash'), ('sql', 'SQL'), ('sql_snowflake', 'Snowflake SQL'), ('sql_redshift', 'Redshift SQL'), ('sql_bigquery', 'BigQuery SQL'), ('sqlfluff', 'Sqlfluff'), ('sqlfluffignore', 'Sqlfluff Ignore'), ('html', 'HTML'), ('ini', 'INI config')], max_length=30), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0018_auto_20230228_1248.py b/src/core/api/app/codegen/migrations/0018_auto_20230228_1248.py new file mode 100644 index 00000000..31b230fa --- /dev/null +++ b/src/core/api/app/codegen/migrations/0018_auto_20230228_1248.py @@ -0,0 +1,40 @@ +# Generated by Django 3.2.16 on 2023-02-28 12:48 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0076_update_account_permissions'), + ('codegen', '0017_alter_template_format'), + ] + + operations = [ + migrations.AddField( + model_name='sqlhook', + name='connection_template', + field=models.ForeignKey(blank=True, help_text='If specified, this hooks will run on selected connection template only.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.connectiontemplate'), + ), + migrations.AlterField( + model_name='sqlhook', + name='environment', + field=models.ForeignKey(blank=True, help_text='If specified, this hooks will run on selected environment only.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.environment'), + ), + migrations.AlterField( + model_name='sqlhook', + name='project', + field=models.ForeignKey(blank=True, help_text='If specified, this hooks will run on selected project only.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.project'), + ), + migrations.AlterField( + model_name='sqlhook', + name='template', + field=models.ForeignKey(help_text='Template used to render the sql that will be ran.', on_delete=django.db.models.deletion.CASCADE, to='codegen.template'), + ), + migrations.AlterField( + model_name='sqlhook', + name='trigger', + field=models.CharField(choices=[('user_credential_post_save', 'User credential post save'), ('user_credential_pre_save', 'User credential pre save')], help_text='Specifies the event that triggers this hook.', max_length=50), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0019_auto_20230228_1415.py b/src/core/api/app/codegen/migrations/0019_auto_20230228_1415.py new file mode 100644 index 00000000..62bcf732 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0019_auto_20230228_1415.py @@ -0,0 +1,22 @@ +# Generated by Django 3.2.16 on 2023-02-28 14:15 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0018_auto_20230228_1248'), + ] + + operations = [ + migrations.RemoveField( + model_name='sqlhook', + name='connection_template', + ), + migrations.AddField( + model_name='sqlhook', + name='connection_templates', + field=models.JSONField(blank=True, default=list, null=True), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0020_auto_20231017_1950.py b/src/core/api/app/codegen/migrations/0020_auto_20231017_1950.py new file mode 100644 index 00000000..ee1db0ce --- /dev/null +++ b/src/core/api/app/codegen/migrations/0020_auto_20231017_1950.py @@ -0,0 +1,26 @@ +# Generated by Django 3.2.20 on 2023-10-17 19:50 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('codegen', '0019_auto_20230228_1415'), + ] + + operations = [ + migrations.AddField( + model_name='sqlhook', + name='updated_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='updated_sql_hooks', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='template', + name='updated_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='updated_templates', to=settings.AUTH_USER_MODEL), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0021_rename_connection_details_sqlhook_connection_overrides.py b/src/core/api/app/codegen/migrations/0021_rename_connection_details_sqlhook_connection_overrides.py new file mode 100644 index 00000000..1d25d900 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0021_rename_connection_details_sqlhook_connection_overrides.py @@ -0,0 +1,27 @@ +# Generated by Django 3.2.20 on 2023-11-13 19:19 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("codegen", "0020_auto_20231017_1950"), + ] + + operations = [ + migrations.RenameField( + model_name="sqlhook", + old_name="connection_details", + new_name="connection_overrides", + ), + migrations.AlterField( + model_name="sqlhook", + name="connection_overrides", + field=models.JSONField( + blank=True, + default=dict, + help_text="Default connection info is taken from trigger context, this dict overrides them.", + null=True, + ), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0022_auto_20240701_2118.py b/src/core/api/app/codegen/migrations/0022_auto_20240701_2118.py new file mode 100644 index 00000000..1878e067 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0022_auto_20240701_2118.py @@ -0,0 +1,34 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +import autoslug.fields +import codegen.models.template +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('users', '0021_auto_20240701_2118'), + ('codegen', '0021_rename_connection_details_sqlhook_connection_overrides'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='account', + field=models.ForeignKey(blank=True, help_text="If null, this is a system template. System templates cannot be modified by users. This isn't enforced by this field, though; it is enforced by created_by being null.", null=True, on_delete=django.db.models.deletion.CASCADE, to='users.account'), + ), + migrations.AlterField( + model_name='template', + name='created_by', + field=models.ForeignKey(blank=True, help_text='If this is null, then it is a system template.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_templates', to=settings.AUTH_USER_MODEL), + ), + migrations.AlterField( + model_name='template', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, help_text="Automatically generated. If this is a global template, it is the template name with spaces turned into hypens. If this is an account template, the account's slug is appended. See AutoSlugField documentation for details on how the slugs are generated.", populate_from=codegen.models.template.template_slug, unique=True), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0023_template_enabled_for.py b/src/core/api/app/codegen/migrations/0023_template_enabled_for.py new file mode 100644 index 00000000..1c5693e4 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0023_template_enabled_for.py @@ -0,0 +1,21 @@ +# Generated by Django 5.0.7 on 2024-08-30 12:48 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("codegen", "0022_auto_20240701_2118"), + ] + + operations = [ + migrations.AddField( + model_name="template", + name="enabled_for", + field=models.JSONField( + default=list, + help_text="A JSON object that defines which classes can access this template.", + ), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0024_alter_template_enabled_for.py b/src/core/api/app/codegen/migrations/0024_alter_template_enabled_for.py new file mode 100644 index 00000000..5de7b9bb --- /dev/null +++ b/src/core/api/app/codegen/migrations/0024_alter_template_enabled_for.py @@ -0,0 +1,18 @@ +# Generated by Django 5.0.7 on 2024-09-19 22:32 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0023_template_enabled_for'), + ] + + operations = [ + migrations.AlterField( + model_name='template', + name='enabled_for', + field=models.JSONField(default=list, help_text='List that defines which classes can access this template.'), + ), + ] diff --git a/src/core/api/app/codegen/migrations/0025_sqlhook_public_key.py b/src/core/api/app/codegen/migrations/0025_sqlhook_public_key.py new file mode 100644 index 00000000..82b40129 --- /dev/null +++ b/src/core/api/app/codegen/migrations/0025_sqlhook_public_key.py @@ -0,0 +1,18 @@ +# Generated by Django 5.0.7 on 2025-04-21 22:55 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0024_alter_template_enabled_for'), + ] + + operations = [ + migrations.AddField( + model_name='sqlhook', + name='public_key', + field=models.TextField(null=True), + ), + ] diff --git a/src/core/api/app/codegen/migrations/__init__.py b/src/core/api/app/codegen/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/codegen/models/__init__.py b/src/core/api/app/codegen/models/__init__.py new file mode 100644 index 00000000..0f886d11 --- /dev/null +++ b/src/core/api/app/codegen/models/__init__.py @@ -0,0 +1,2 @@ +from .sql_hook import * # noqa: F401,F403 +from .template import * # noqa: F401,F403 diff --git a/src/core/api/app/codegen/models/sql_hook.py b/src/core/api/app/codegen/models/sql_hook.py new file mode 100644 index 00000000..cfe40ee1 --- /dev/null +++ b/src/core/api/app/codegen/models/sql_hook.py @@ -0,0 +1,185 @@ +from autoslug import AutoSlugField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import models +from projects.models import ConnectionTemplate +from users.models import Account + +from lib.dicts import deep_merge + +from ..sql_runners import run_on_sql_runner +from .template import Template + + +def sqlhook_slug(instance): + return f"{instance.name}-{instance.account.slug}" + + +class SQLHook(AuditModelMixin, DatacovesModel): + """Run SQL based on system events + + This can be used to run arbitrary SQL statements based on different + triggers. The SQL statements are in :model:`codegen.Template` model + objects and can have replacement variables in them. + + There are a lot of validation rules around what combination of fields + are allowed; if this topic is relevant to what you're doing, it is + recommended you read the 'clean' method of this class. + + ========= + CONSTANTS + ========= + + - TRIGGER_USER_CREDENTIAL_POST_SAVE + - TRIGGER_USER_CREDENTIAL_PRE_SAVE + + ======= + METHODS + ======= + + - **clean()** - A private method for doing validation checks on save + - **save(...)** - Overrides save in order to run clean() + - **run(context, base_connection)** - Runs the hook + - **render(context)** - Renders the SQL for the hook. This is mostly + used by run(...) but could be used in other places as needed. + """ + + TRIGGER_USER_CREDENTIAL_POST_SAVE = "user_credential_post_save" + TRIGGER_USER_CREDENTIAL_PRE_SAVE = "user_credential_pre_save" + TRIGGERS = ( + (TRIGGER_USER_CREDENTIAL_POST_SAVE, "User credential post save"), + (TRIGGER_USER_CREDENTIAL_PRE_SAVE, "User credential pre save"), + ) + + name = models.CharField(max_length=250) + slug = AutoSlugField(populate_from=sqlhook_slug, unique=True) + trigger = models.CharField( + max_length=50, + choices=TRIGGERS, + help_text="Specifies the event that triggers this hook.", + ) + created_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + related_name="created_sql_hooks", + blank=True, + null=True, + ) + updated_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + related_name="updated_sql_hooks", + blank=True, + null=True, + ) + template = models.ForeignKey( + Template, + on_delete=models.CASCADE, + help_text="Template used to render the sql that will be ran.", + ) + account = models.ForeignKey(Account, on_delete=models.CASCADE) + project = models.ForeignKey( + "projects.Project", + on_delete=models.SET_NULL, + null=True, + blank=True, + help_text="If specified, this hooks will run on selected project only.", + ) + environment = models.ForeignKey( + "projects.Environment", + on_delete=models.SET_NULL, + null=True, + blank=True, + help_text="If specified, this hooks will run on selected environment only.", + ) + connection_templates = models.JSONField(default=list, null=True, blank=True) + connection_overrides = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="Default connection info is taken from trigger context, this dict overrides them.", + ) + connection_type = models.ForeignKey( + "projects.ConnectionType", + on_delete=models.CASCADE, + related_name="sql_hooks", + ) + enabled = models.BooleanField(default=False) + public_key = models.TextField(null=True) + + def __str__(self): + return self.name + + @property + def is_system_sqlhook(self) -> bool: + """System SQL hooks are created_by None""" + + return self.created_by is None + + def clean(self): # noqa: C901 + """Run validation against fields pre-save. The validation rules are + fairly complicated and quite easy to read, so this comment will not + take a deep dive. This may raise a ValidationError""" + + if ( + self.trigger == self.TRIGGER_USER_CREDENTIAL_POST_SAVE + and self.template.context_type != Template.CONTEXT_TYPE_USER_CREDENTIAL + ): + raise ValidationError( + f"Hooks triggered on '{self.trigger}' can only be associated to templates " + f"with context type '{Template.CONTEXT_TYPE_USER_CREDENTIAL}'" + ) + if self.project and self.project.account != self.account: + raise ValidationError("Project must belong to selected account.") + if self.environment and self.environment.project.account != self.account: + raise ValidationError("Environment must belong to selected account.") + if self.connection_templates: + conn_templates = ConnectionTemplate.objects.filter( + id__in=self.connection_templates + ) + if conn_templates.count() != len(self.connection_templates): + raise ValidationError("Connection template ids not found") + for conn_template in conn_templates: + if conn_template.project.account != self.account: + raise ValidationError( + "Connection template must belong to selected account." + ) + if self.project and self.environment: + raise ValidationError( + "Choose either a project or an environment, not both." + ) + if self.project and self.connection_templates: + raise ValidationError( + "Choose either a project or connection templates, not both." + ) + + # This was in its own method just to avoid a "method too complex" + # error from flake8; however, I believe that actually makes the + # method more complex because it puts a couple checks up in another + # method to satisfy an arbitrary requirement. Let's, instead, ignore + # the flake8 error here. + if self.pk and self.is_system_sqlhook and self.updated_by: + old_sh = SQLHook.objects.get(pk=self.pk) + if old_sh.enabled == self.enabled: + # If other than the enabled feature was changed + raise ValidationError( + "SQL Hooks created by system can not be modified." + ) + self.template.is_enabled_for(__class__.__name__) + + def run(self, context: dict, base_connection: dict): + """Runs the hook using a context""" + sql_script = self.render(context) + if sql_script.strip(): + conn_data = deep_merge(self.connection_overrides, base_connection) + run_on_sql_runner(conn_data, sql_script, self.connection_type.slug) + + def save(self, *args, **kwargs): + self.clean() + return super().save(*args, **kwargs) + + def render(self, context): + """Render the SQL query with the given context""" + return self.template.render(context) diff --git a/src/core/api/app/codegen/models/template.py b/src/core/api/app/codegen/models/template.py new file mode 100644 index 00000000..1aceaea3 --- /dev/null +++ b/src/core/api/app/codegen/models/template.py @@ -0,0 +1,276 @@ +from autoslug import AutoSlugField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import models +from users.models import Account + +from ..templating import render_template + + +def template_slug(instance): + if instance.account: + return f"{instance.name}-{instance.account.slug}" + else: + return instance.name + + +class Template(AuditModelMixin, DatacovesModel): + """Templates are used to generate files on customer instances + + This is, for now, only used by Code Server but could be used by anything. + It is for files like .bashrc, .gitconfig, pre-commit hooks, etc. + + Context Type determines which variables are available to the user; + the selection logic is in the Code Server adapator, but the variables + available are easily seen in codegen.templating + + It would make more sense to bring the 'business logic' of template + context selection into the model rather than have the logic in code + server's adaptor, which would make Templates more independent, but there + is no demand for this right now so we'll leave it how it is. + + Templates can be system templates (is_system_template returns True; + this is currently based on created_by being None) or account templates. + Account templates override system templates. System templates cannot be + modified by users. + + ========= + Constants + ========= + + ------- + Context + ------- + + Context controls which variables are available to the template. + + - CONTEXT_TYPE_NONE - gets no variables + - CONTEXT_TYPE_USER_CREDENTIAL - user, account, ssl_public_key + - CONTEXT_TYPE_USER_CREDENTIALS - connections, environment ... + connections is a list of dictionaries with 'name', 'slug', + 'type', and 'ssl_public_key' from user.credentials.combined_onnection() + ... environment is the contents of CONTEXT_TYPE_ENVIRONMENT below. + - CONTEXT_TYPE_ENVIRONMENT - dbt_home_path, type, slug, settings, + release_profile, profile_flags, dbt_profile, protected_branch + - CONTEXT_TYPES - a tuple of tuple pairs for populating a select box + + ------- + Formats + ------- + + Format is currently used to determine what kind of comment we can + inject into a file. See the embedded_comment property method. + + - FORMAT_NONE + - FORMAT_JSON + - FORMAT_YAML + - FORMAT_PYTHON + - FORMAT_BASH + - FORMAT_SQL + - FORMAT_SQL_SNOWFLAKE + - FORMAT_SQL_REDSHIFT + - FORMAT_SQL_BIGQUERY + - FORMAT_SQLFLUFF + - FORMAT_SQLFLUFF_IGNORE + - FORMAT_HTML + - FORMAT_INI + - FORMATS - tuple of tuple pairs for display in a select box + + ======= + Methods + ======= + + - **clean()** - Basically a private method, used for validation. + - **save(...)** - Overriden to support clean's validation. + - **render(context)** - Renders the template and returns it + """ + + CONTEXT_TYPE_NONE = "none" + CONTEXT_TYPE_USER_CREDENTIAL = "user_credential" + CONTEXT_TYPE_USER_CREDENTIALS = "user_credentials" + CONTEXT_TYPE_ENVIRONMENT = "environment" + CONTEXT_TYPE_USER = "user" + CONTEXT_TYPES = ( + ( + CONTEXT_TYPE_NONE, + "No context", + ), + ( + CONTEXT_TYPE_USER_CREDENTIAL, + "User credential", + ), + ( + CONTEXT_TYPE_USER_CREDENTIALS, + "User credentials", + ), + ( + CONTEXT_TYPE_ENVIRONMENT, + "Environment", + ), + ( + CONTEXT_TYPE_USER, + "User", + ), + ) + + FORMAT_NONE = "none" + FORMAT_JSON = "json" + FORMAT_YAML = "yaml" + FORMAT_PYTHON = "python" + FORMAT_BASH = "bash" + FORMAT_SQL = "sql" + FORMAT_SQL_SNOWFLAKE = "sql_snowflake" + FORMAT_SQL_REDSHIFT = "sql_redshift" + FORMAT_SQL_BIGQUERY = "sql_bigquery" + FORMAT_SQLFLUFF = "sqlfluff" + FORMAT_SQLFLUFF_IGNORE = "sqlfluffignore" + FORMAT_HTML = "html" + FORMAT_INI = "ini" + FORMATS = ( + ( + FORMAT_NONE, + "No format", + ), + ( + FORMAT_JSON, + "JSON", + ), + ( + FORMAT_YAML, + "YAML", + ), + ( + FORMAT_PYTHON, + "Python", + ), + ( + FORMAT_BASH, + "Bash", + ), + ( + FORMAT_SQL, + "SQL", + ), + ( + FORMAT_SQL_SNOWFLAKE, + "Snowflake SQL", + ), + ( + FORMAT_SQL_REDSHIFT, + "Redshift SQL", + ), + ( + FORMAT_SQL_BIGQUERY, + "BigQuery SQL", + ), + (FORMAT_SQLFLUFF, "Sqlfluff"), + (FORMAT_SQLFLUFF_IGNORE, "Sqlfluff Ignore"), + (FORMAT_HTML, "HTML"), + (FORMAT_INI, "INI config"), + ) + USAGE_CONNECTION_TEMPLATES = "ConnectionTemplate" + USAGE_SQLHOOKS = "SQLHook" + USAGE_PROFILE_FILES = "ProfileFile" + USAGES = ( + (USAGE_CONNECTION_TEMPLATES, "Connection Templates"), + (USAGE_SQLHOOKS, "SQL Hooks"), + (USAGE_PROFILE_FILES, "Profile Files"), + ) + + name = models.CharField(max_length=250) + slug = AutoSlugField( + populate_from=template_slug, + unique=True, + help_text="Automatically generated. If this is a global template, " + "it is the template name with spaces turned into hypens. If this " + "is an account template, the account's slug is appended. See " + "AutoSlugField documentation for details on how the slugs are " + "generated.", + ) + description = models.TextField(null=True, blank=True) + content = models.TextField() + created_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + related_name="created_templates", + blank=True, + null=True, + help_text="If this is null, then it is a system template.", + ) + updated_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + related_name="updated_templates", + blank=True, + null=True, + ) + account = models.ForeignKey( + Account, + on_delete=models.CASCADE, + blank=True, + null=True, + help_text="If null, this is a system template. System templates " + "cannot be modified by users. This isn't enforced by this field, " + "though; it is enforced by created_by being null.", + ) + context_type = models.CharField( + max_length=30, + choices=CONTEXT_TYPES, + help_text="Fields per context: user (email, name, username, slug), " + "user credenial (user, ssl_public_key), " + "user credenials (environment, connections), " + "environment (dbt_home_path, type, slug, settings)", + ) + format = models.CharField(max_length=30, choices=FORMATS) + enabled_for = models.JSONField( + default=list, + help_text="List that defines which classes can access this template.", + ) + + def __str__(self): + return self.name + + def clean(self): + """Enforce the integrity of system templates; throws ValidationError + if a user tries to modify a system template.""" + + if self.pk and self.is_system_template and self.updated_by: + raise ValidationError("Templates created by system can not be modified.") + + def save(self, *args, **kwargs): + """Override save to enforce the clean() process above""" + + self.clean() + super().save(*args, **kwargs) + + def render(self, context): + """Receives a context and returns a rendered template text""" + + return render_template(self.content, context) + + def is_enabled_for(self, model_name: str): + """Validate this template is enabled for the given model""" + if model_name not in self.enabled_for: + raise ValidationError(f"Template {self} is not enabled for {model_name}") + + @property + def embedded_comment(self) -> str: + """Generates the embedded comment text which can be added to template + files to indicate it was generated by Data Coves. Note that this will + default to bash-style comments even if FORMAT_NONE is used. + """ + + comment = "# Generated by datacoves\n\n" + format = self.format + if format == Template.FORMAT_HTML: + comment = "\n\n" + elif format == Template.FORMAT_JSON or format == Template.FORMAT_BASH: + comment = "" + return comment + + @property + def is_system_template(self) -> bool: + """True if this is a system template, which is read-only to users""" + return self.created_by is None diff --git a/src/core/api/app/codegen/serializers.py b/src/core/api/app/codegen/serializers.py new file mode 100644 index 00000000..af1cc6fd --- /dev/null +++ b/src/core/api/app/codegen/serializers.py @@ -0,0 +1,9 @@ +from rest_framework import serializers + +from .models import Template + + +class TemplateSerializer(serializers.ModelSerializer): + class Meta: + model = Template + fields = ["id", "name", "description", "context_type", "format"] diff --git a/src/core/api/app/codegen/signals.py b/src/core/api/app/codegen/signals.py new file mode 100644 index 00000000..0003924b --- /dev/null +++ b/src/core/api/app/codegen/signals.py @@ -0,0 +1,59 @@ +from django.db.models import Q +from django.db.models.signals import post_save, pre_save +from django.dispatch import receiver +from projects.models import ConnectionTemplate, UserCredential + +from .models import SQLHook +from .templating import build_user_credential_context + + +@receiver( + pre_save, + sender=UserCredential, + dispatch_uid="codegen.handle_user_credential_pre_save", +) +def handle_user_credential_pre_save(sender, **kwargs): + user_credential = kwargs["instance"] + _process_event(user_credential, SQLHook.TRIGGER_USER_CREDENTIAL_PRE_SAVE) + + +@receiver( + post_save, + sender=UserCredential, + dispatch_uid="codegen.handle_user_credential_post_save", +) +def handle_user_credential_post_save(sender, **kwargs): + user_credential = kwargs["instance"] + _process_event(user_credential, SQLHook.TRIGGER_USER_CREDENTIAL_POST_SAVE) + + +def _process_event(user_credential, trigger): + # Do not run sql hooks on user provided usernames to avoid impersonation + if ( + user_credential.connection_template.connection_user + == ConnectionTemplate.CONNECTION_USER_PROVIDED + ): + return + + for hook in ( + SQLHook.objects.filter( + trigger=trigger, + enabled=True, + connection_type=user_credential.connection_template.type, + account=user_credential.environment.project.account, + ) + .filter( + Q(project__isnull=True) | Q(project=user_credential.environment.project) + ) + .filter( + Q(environment__isnull=True) | Q(environment=user_credential.environment) + ) + .filter( + Q(connection_templates=[]) + | Q(connection_templates__contains=user_credential.connection_template_id) + ) + ): + hook.run( + build_user_credential_context(user_credential), + user_credential.combined_connection(), + ) diff --git a/src/core/api/app/codegen/sql_runners.py b/src/core/api/app/codegen/sql_runners.py new file mode 100644 index 00000000..6568517d --- /dev/null +++ b/src/core/api/app/codegen/sql_runners.py @@ -0,0 +1,28 @@ +import shlex +import subprocess + +from projects.exceptions import SQLHookException +from projects.runners import utils + + +def _run_script(cmd_list): + """ + Run a connection test. + """ + try: + subprocess.check_output(cmd_list) + except subprocess.CalledProcessError as e: + stderr = e.output.decode("utf-8") + raise SQLHookException(stderr) + except subprocess.TimeoutExpired: + raise SQLHookException("SQL hook timed out. Please check host") + + +def run_on_sql_runner(connection: dict, script: str, runner: str): + connection_b64 = utils.get_connection_b64(connection) + script_b64 = utils.get_script_b64(script) + cmd_list = shlex.split( + f"/bin/bash -c 'source ${utils.SQL_RUNNERS_VIRTUALENVS[runner]}/bin/activate && python\ + projects/runners/run_on_{runner}.py {connection_b64} {script_b64}'" + ) + return _run_script(cmd_list) diff --git a/src/core/api/app/codegen/templating.py b/src/core/api/app/codegen/templating.py new file mode 100644 index 00000000..ddd5f3ff --- /dev/null +++ b/src/core/api/app/codegen/templating.py @@ -0,0 +1,95 @@ +import jinja2 + + +def escape_quotes(text: str): + return text.replace('"', '\\"').replace("'", "\\'") + + +def render_template(template_content, context): + environment = jinja2.Environment( + # Throw if the template references an undefined variable. + undefined=jinja2.StrictUndefined, + lstrip_blocks=True, + trim_blocks=True, + ) + environment.filters["escape_quotes"] = escape_quotes + template = environment.from_string(template_content) + return template.render(context) + + +# Contexts + + +def build_user_context(user): + """Builds user context""" + return { + "email": user.email, + "name": user.name, + "username": user.email_username, + "slug": user.slug, + } + + +def build_user_credential_context(user_credential): + """Builds user credential context, with a subset of fields as we don't want to expose secrets""" + + conn_data = user_credential.combined_connection() + public_key = _get_ssl_public_key(user_credential) + return { + "user": conn_data.get("user"), + "account": conn_data.get("account"), + "ssl_public_key": public_key, + } + + +def build_user_credentials_context(user, env, creds: list = None): + """creds, if provided, should be the user credentails already pre-loaded + and properly filtered by 'env' in a list.""" + + if creds is None: + user_credentials = ( + user.credentials.select_related("connection_template__type") + .select_related("ssl_key") + .filter(environment=env, validated_at__isnull=False) + .order_by("id") + ) + + else: + user_credentials = [x for x in creds if x.validated_at is not None] + user_credentials.sort(key=lambda x: x.id) + + connections = [] + for uc in user_credentials: + conn_data = uc.combined_connection() + conn_data["name"] = uc.name + conn_data["slug"] = uc.slug + conn_data["type"] = uc.connection_template.type_slug + conn_data["ssl_public_key"] = _get_ssl_public_key(uc) + connections.append(conn_data) + return {"connections": connections, "environment": build_environment_context(env)} + + +def _get_ssl_public_key(user_credential): + """Returns a user credential striped public key (without headers) if not none""" + public_key = user_credential.ssl_key.public if user_credential.ssl_key else "" + public_key = public_key.strip() + if public_key.startswith("--"): # strip -----BEGIN PUBLIC KEY-----, etc. + public_key_lines = public_key.split("\n") + public_key = "".join(public_key_lines[1:-1]) + return public_key + + +def build_environment_context(env): + """Context with environment data useful for templating""" + return { + "dbt_home_path": env.dbt_home_path if env.dbt_home_path else "", + "type": env.type, + "slug": env.slug, + "settings": env.settings, + "release_profile": env.release_profile, + "profile_flags": env.profile_flags, + "dbt_profile": env.dbt_profile, + "protected_branch": ( + env.project.release_branch if env.project.release_branch_protected else "" + ), + } diff --git a/src/core/api/app/codegen/views.py b/src/core/api/app/codegen/views.py new file mode 100644 index 00000000..45a3b0ae --- /dev/null +++ b/src/core/api/app/codegen/views.py @@ -0,0 +1,26 @@ +from core.mixins.views import AddAccountToContextMixin +from django.db.models import Q +from django_filters.rest_framework import DjangoFilterBackend +from iam.permissions import HasResourcePermission +from rest_framework import filters, generics +from rest_framework.permissions import IsAuthenticated + +from .models import Template +from .serializers import TemplateSerializer + + +class TemplateList( + generics.ListCreateAPIView, + AddAccountToContextMixin, +): + filter_backends = [filters.SearchFilter, DjangoFilterBackend] + filterset_fields = ["context_type"] + search_fields = ["name"] + serializer_class = TemplateSerializer + permission_classes = [IsAuthenticated, HasResourcePermission] + + def get_queryset(self): + return Template.objects.filter( + Q(account__slug=self.kwargs.get("account_slug")) | Q(account__isnull=True), + enabled_for__contains=self.request.query_params.get("enabled_for", []), + ).order_by("name") diff --git a/src/core/api/app/core/__init__.py b/src/core/api/app/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/core/apps.py b/src/core/api/app/core/apps.py new file mode 100644 index 00000000..c0ce093b --- /dev/null +++ b/src/core/api/app/core/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class CoreConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "core" diff --git a/src/core/api/app/core/fields.py b/src/core/api/app/core/fields.py new file mode 100644 index 00000000..f3ad26eb --- /dev/null +++ b/src/core/api/app/core/fields.py @@ -0,0 +1,80 @@ +import json + +from cryptography.fernet import Fernet +from django import forms +from django.conf import settings +from django.db import models + + +class EncryptedField(models.BinaryField): + def __init__(self, *args, **kwargs): + kwargs.setdefault("editable", True) + super().__init__(*args, **kwargs) + self._fernet = Fernet(settings.FERNET_KEY) + + def get_prep_value(self, value): + if value is None: + return None + return self._fernet.encrypt(value) + + def from_db_value(self, value, expression, connection): + if value is None: + return None + return self._fernet.decrypt(bytes(value)) + + def value_to_string(self, obj): + return self.value_from_object(obj) + + +class EncryptedTextField(EncryptedField): + def get_prep_value(self, value): + if value is None: + return None + return super().get_prep_value(bytes(value, "utf-8")) + + def to_python(self, value): + return value + + def from_db_value(self, value, expression, connection): + if value is None: + return None + return super().from_db_value(value, expression, connection).decode() + + def formfield(self, **kwargs): + return super().formfield( + **{ + "widget": forms.Textarea, + **kwargs, + } + ) + + +class EncryptedJSONField(EncryptedField): + def get_prep_value(self, value): + if value is None: + return None + return super().get_prep_value(bytes(json.dumps(value), "ascii")) + + def from_db_value(self, value, expression, connection): + plaintext = super().from_db_value(value, expression, connection) + if plaintext is None: + return None + try: + return json.loads(plaintext) + except json.JSONDecodeError: + return plaintext + + def to_python(self, value): + if isinstance(value, str): + # FIXME: This is not a good solution + value = value.replace(":false", ":False").replace(":true", ":True") + return eval(value) + return value + + def formfield(self, **kwargs): + return super().formfield( + **{ + "form_class": forms.JSONField, + **kwargs, + } + ) diff --git a/src/core/api/app/core/management/__init__.py b/src/core/api/app/core/management/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/core/management/commands/runcelery.py b/src/core/api/app/core/management/commands/runcelery.py new file mode 100644 index 00000000..953f2d6b --- /dev/null +++ b/src/core/api/app/core/management/commands/runcelery.py @@ -0,0 +1,32 @@ +import os +import subprocess + +from django.core.management.base import BaseCommand +from django.utils import autoreload + + +def run_celery(queue): + subprocess.call("pkill -9 -f datacoves".split()) + os.environ["WORKER"] = "1" + # FIXME: Added --without-mingle --without-gossip bec of an issue on celery/redis + # https://github.com/celery/celery/discussions/7276 + subprocess.call( + [ + "su", + "abc", + "-c", + f"celery -A datacoves worker -l INFO -E -Q {queue} --without-mingle --without-gossip", + ] + ) + + +class Command(BaseCommand): + help = "Run Celery workers, restarting them on code changes." + + def add_arguments(self, parser): + parser.add_argument("queue", type=str, default="api-main") + + def handle(self, *args, **options): + queue = options["queue"] + self.stdout.write(f"Starting celery worker with autoreload (queue {queue})...") + autoreload.run_with_reloader(run_celery, queue) diff --git a/src/core/api/app/core/management/commands/wait_for_db.py b/src/core/api/app/core/management/commands/wait_for_db.py new file mode 100644 index 00000000..6a8b8788 --- /dev/null +++ b/src/core/api/app/core/management/commands/wait_for_db.py @@ -0,0 +1,22 @@ +import time + +from django.core.management.base import BaseCommand +from django.db.utils import OperationalError +from psycopg2 import OperationalError as Psycopg2Error + + +class Command(BaseCommand): + """Django command to wait for database.""" + + def handle(self, *args, **options): + self.stdout.write("Waiting for database...") + db_up = False + while db_up is False: + try: + self.check(databases=["default"]) + db_up = True + except (Psycopg2Error, OperationalError): + self.stdout.write("Database unavailable, waiting 1 second...") + time.sleep(1) + + self.stdout.write(self.style.SUCCESS("Database available!")) diff --git a/src/core/api/app/core/management/commands/wait_for_model.py b/src/core/api/app/core/management/commands/wait_for_model.py new file mode 100644 index 00000000..4cbf7931 --- /dev/null +++ b/src/core/api/app/core/management/commands/wait_for_model.py @@ -0,0 +1,51 @@ +import time + +from django.apps import apps +from django.core.management.base import BaseCommand + + +class Command(BaseCommand): + """Django command to wait for a model.""" + + def add_arguments(self, parser): + parser.add_argument( + "--app", + help="Django app.", + default="clusters", + ) + + parser.add_argument( + "--model", + help="Django model.", + default="Cluster", + ) + + parser.add_argument( + "--has-records", + help="Has records.", + default="false", + ) + + def handle(self, *args, **options): + app = options["app"] + model = options["model"] + has_records = options["has_records"].lower() == "true" + + self.stdout.write(f"Waiting for {app}.{model} Django model") + model_exists = False + while model_exists is False: + try: + Model = apps.get_model(app_label=app, model_name=model) + if has_records: + model_exists = Model.objects.exists() + time.sleep(1) + else: + model_exists = True + + except Exception: + self.stdout.write( + f"{app}.{model} Django model unavailable, waiting 1 second..." + ) + time.sleep(1) + + self.stdout.write(self.style.SUCCESS(f"{app}.{model} Django model available!")) diff --git a/src/core/api/app/core/management/commands/wait_for_redis.py b/src/core/api/app/core/management/commands/wait_for_redis.py new file mode 100644 index 00000000..745b8c1f --- /dev/null +++ b/src/core/api/app/core/management/commands/wait_for_redis.py @@ -0,0 +1,22 @@ +import time + +from django.core.cache import cache +from django.core.management.base import BaseCommand + + +class Command(BaseCommand): + """Django command to wait for redis.""" + + def handle(self, *args, **options): + self.stdout.write("Waiting for Redis...") + redis_up = False + while redis_up is False: + try: + # celery -A datacoves status + cache.set("datacoves_redis_healtcheck", "ok") + redis_up = True + except Exception: + self.stdout.write("Redis unavailable, waiting 1 second...") + time.sleep(1) + + self.stdout.write(self.style.SUCCESS("Redis available!")) diff --git a/src/core/api/app/core/middleware.py b/src/core/api/app/core/middleware.py new file mode 100644 index 00000000..7792ac1b --- /dev/null +++ b/src/core/api/app/core/middleware.py @@ -0,0 +1,21 @@ +from django.conf import settings +from django.http import HttpResponse +from django.template.loader import get_template +from social_core.exceptions import AuthException + + +class AuthErrorHandlerMiddleware: + def __init__(self, get_response): + self.get_response = get_response + + def __call__(self, request): + response = self.get_response(request) + return response + + def process_exception(self, request, exception): + if not settings.DEBUG and isinstance(exception, AuthException): + template = get_template("auth-exception.html") + return HttpResponse( + template.render(context={"logout_url": settings.LOGOUT_REDIRECT_URL}), + status=500, + ) diff --git a/src/core/api/app/core/migrations/__init__.py b/src/core/api/app/core/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/core/mixins/__init__.py b/src/core/api/app/core/mixins/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/core/mixins/models.py b/src/core/api/app/core/mixins/models.py new file mode 100644 index 00000000..c0aa1fc2 --- /dev/null +++ b/src/core/api/app/core/mixins/models.py @@ -0,0 +1,71 @@ +import uuid + +from django.db import models + + +class EidModelMixin(models.Model): + eid = models.UUIDField(default=uuid.uuid4, unique=True, editable=False) + + class Meta: + abstract = True + + +class AuditModelMixin(models.Model): + created_at = models.DateTimeField(auto_now_add=True, editable=False) + updated_at = models.DateTimeField(auto_now=True, editable=False) + + class Meta: + abstract = True + + +class LogEntryMixin(models.Model): + at = models.DateTimeField(auto_now_add=True, editable=False) + topic = models.SlugField(default="", editable=False) + data = models.JSONField(default=dict, editable=False) + + class Meta: + abstract = True + indexes = [ + models.Index(fields=["-at"]), + models.Index(fields=["topic", "-at"]), + ] + + +# FIXME: Commented out since it is not being used and requires a fixing the scenario when `from_db` +# is not called from a post_save signal, and _loaded_values is None. + + +# class DiffMixin(models.Model): +# """ +# DiffMixin saves the original values loaded from the database for the fields +# specified in the model's meta diff_fields attribute. This enables detecting +# changes to these fields. The diff method is provided for this purpose. +# """ + +# class Meta: +# abstract = True + +# @classmethod +# def from_db(cls, db, field_names, values): +# instance = super().from_db(db, field_names, values) +# field_values = (value for value in values if value is not models.DEFERRED) +# instance._loaded_values = { +# field_name: deepcopy(field_value) +# for field_name, field_value in zip(field_names, field_values) +# if field_name in cls.DIFF_FIELDS +# } +# return instance + +# def diff(self): +# """ +# Returns a dictionary where the keys are the names of the diff fields +# that have changed since load time. The values are tuples of the form +# (loaded_value, current_value) +# """ +# delta = {} +# for field_name in self.DIFF_FIELDS: +# loaded_value = self._loaded_values.get(field_name) +# current_value = getattr(self, field_name) +# if loaded_value != current_value: +# delta[field_name] = (loaded_value, current_value) +# return delta diff --git a/src/core/api/app/core/mixins/views.py b/src/core/api/app/core/mixins/views.py new file mode 100644 index 00000000..d66e8799 --- /dev/null +++ b/src/core/api/app/core/mixins/views.py @@ -0,0 +1,101 @@ +from django.core.exceptions import ValidationError +from django.db import IntegrityError +from projects.exceptions import HookException +from rest_framework import status +from rest_framework.response import Response + + +class AddAccountToContextMixin: + def get_serializer_context(self): + context = super().get_serializer_context() + context.update({"account": self.kwargs.get("account_slug")}) + return context + + +class AddProjectEnvToContextMixin: + def get_serializer_context(self): + context = super().get_serializer_context() + context.update({"environment": self.kwargs.get("environment_slug")}) + return context + + +class BaseVerboseMixin: + def get_integrity_exception_message(self, ex, data): + return str(ex) + + def get_validation_exception_message(self, ex, data): + return str(ex) + + def get_hook_exception_message(self, ex, data): + return f"Error running hook: {ex} ({ex.__class__.__name__})".replace('"', "'") + + +class VerboseCreateModelMixin(BaseVerboseMixin): + """ + Create a model instance and return either created object or the validation errors. + """ + + def create(self, request, *args, **kwargs): + serializer = self.get_serializer(data=request.data) + if serializer.is_valid(): + try: + self.perform_create(serializer) + except HookException as ex: + return Response( + self.get_hook_exception_message(ex, request.data), + status=status.HTTP_400_BAD_REQUEST, + ) + except IntegrityError as ex: + return Response( + self.get_integrity_exception_message(ex, request.data), + status=status.HTTP_400_BAD_REQUEST, + ) + except ValidationError as ex: + return Response( + self.get_validation_exception_message(ex, request.data), + status=status.HTTP_400_BAD_REQUEST, + ) + else: + headers = self.get_success_headers(serializer.data) + return Response( + serializer.data, status=status.HTTP_201_CREATED, headers=headers + ) + else: + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) + + +class VerboseUpdateModelMixin(BaseVerboseMixin): + """ + Update a model instance and return either updated object or the validation errors + """ + + def update(self, request, *args, **kwargs): + partial = kwargs.pop("partial", False) + instance = self.get_object() + serializer = self.get_serializer(instance, data=request.data, partial=partial) + if serializer.is_valid(): + try: + self.perform_update(serializer) + except HookException as ex: + return Response( + self.get_hook_exception_message(ex, request.data), + status=status.HTTP_400_BAD_REQUEST, + ) + except IntegrityError as ex: + return Response( + self.get_integrity_exception_message(ex, request.data), + status=status.HTTP_400_BAD_REQUEST, + ) + except ValidationError as ex: + return Response( + self.get_validation_exception_message(ex, request.data), + status=status.HTTP_400_BAD_REQUEST, + ) + else: + if getattr(instance, "_prefetched_objects_cache", None): + # If 'prefetch_related' has been applied to a queryset, we need to + # forcibly invalidate the prefetch cache on the instance. + instance._prefetched_objects_cache = {} + return Response(serializer.data) + else: + return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) diff --git a/src/core/api/app/core/models.py b/src/core/api/app/core/models.py new file mode 100644 index 00000000..fcdbf6c1 --- /dev/null +++ b/src/core/api/app/core/models.py @@ -0,0 +1,28 @@ +"""Base class for all DataCoves models""" + +from django.db import models + + +class DatacovesModel(models.Model): + """All models in DataCoves should extend this as a base. Anything + that should be common to all models can be put in here. Please use + this sparingly and carefully.""" + + class Meta: + abstract = True + + def is_relation_cached(self, relation_name: str) -> bool: + """This checks to see if 'relation_name' is loaded in our model's + cache. This is useful for optimizations where we could loop over + cache instead of doing a SQL query in certain cases.""" + + # This can show up in potentially two places. _state is for one + # to one or one to many relations, _prefetched_objects_cache is + # for many-to-many or reverse relations. + # + # The model object will not have _prefetched_objects_cache by + # default so always check for the field's existance first. + return relation_name in self._state.fields_cache or ( + hasattr(self, "_prefetched_objects_cache") + and relation_name in self._prefetched_objects_cache + ) diff --git a/src/core/api/app/core/serializers.py b/src/core/api/app/core/serializers.py new file mode 100644 index 00000000..1ab23d89 --- /dev/null +++ b/src/core/api/app/core/serializers.py @@ -0,0 +1,47 @@ +import re + +from rest_framework import serializers + +ENCODED_VALUE = "********" + + +class EncodedValueField(serializers.JSONField): + def __init__(self, *args, **kwargs): + self.encoded_value = kwargs.pop("encoded_value", ENCODED_VALUE) + # it could be i.e. "secret|password" + self.attr_filter = kwargs.pop("attr_filter", r".*") + super().__init__(*args, **kwargs) + + def get_encoded_values(self, encoded, values): + if values is None: + return encoded + + for attr, value in values.items(): + if isinstance(value, (float, int, str, bool)): + encoded[attr] = ( + ENCODED_VALUE if re.match(self.attr_filter, attr) else value + ) + else: + encoded[attr] = {} + self.get_encoded_values(encoded[attr], value) + + return encoded + + def to_representation(self, value): + value = super().to_representation(value) + encoded = self.get_encoded_values({}, value) + return encoded + + @classmethod + def decode_values(cls, instance, values, decoded): + """This function should be called on serializer update to avoid overriding the encoded values""" + for attr, value in values.items(): + if isinstance(value, (float, int, str, bool)): + if value == ENCODED_VALUE and instance.get(attr): + decoded[attr] = instance[attr] + else: + decoded[attr] = value + else: + decoded[attr] = {} + cls.decode_values(instance.get(attr, {}), value, decoded[attr]) + return decoded diff --git a/src/core/api/app/core/static/admin/img/favicon.ico b/src/core/api/app/core/static/admin/img/favicon.ico new file mode 100644 index 00000000..12ff98b8 Binary files /dev/null and b/src/core/api/app/core/static/admin/img/favicon.ico differ diff --git a/src/core/api/app/core/static/admin/img/logo_navyblue.svg b/src/core/api/app/core/static/admin/img/logo_navyblue.svg new file mode 100644 index 00000000..e7fd17d4 --- /dev/null +++ b/src/core/api/app/core/static/admin/img/logo_navyblue.svg @@ -0,0 +1,4 @@ + + + + diff --git a/src/core/api/app/core/static/admin/js/shortcuts.js b/src/core/api/app/core/static/admin/js/shortcuts.js new file mode 100644 index 00000000..aeb8864b --- /dev/null +++ b/src/core/api/app/core/static/admin/js/shortcuts.js @@ -0,0 +1,82 @@ +document.addEventListener('DOMContentLoaded', () => { + const links = JSON.parse(document.getElementById('shortcuts-links').textContent); + let visible = false; + const back = document.createElement('div'); + back.setAttribute('id', 'shortcuts'); + back.setAttribute('tabindex', ''); + const select = document.createElement('select'); + const empty = document.createElement('option'); + select.appendChild(empty); + links.forEach(({name, url}) => { + const option = document.createElement('option'); + option.setAttribute('value', url); + option.innerText = name; + select.appendChild(option); + }); + + back.appendChild(select); + document.querySelector('body').appendChild(back); + + const show = () => { + visible = true; + back.style.display = 'block'; + $(select).val(null).trigger('change').select2('open'); + }; + + const hide = () => { + visible = false; + $(select).select2('close'); + back.style.display = 'none'; + }; + + const toggle = () => { + visible ? hide() : show(); + }; + + const backKeyDown = (event) => { + if (event.keyCode === 27) { + hide(); + } + }; + + const backClick = (event) => { + hide(); + }; + + const documentKeyDown = (event) => { + if (event.keyCode === 75 && (event.ctrlKey || event.metaKey)) { + toggle(); + event.preventDefault(); + } + }; + + const selectSelect = (event) => { + hide(); + window.location.href = event.params.data.id; + }; + + const selectClose = (event) => { + hide(); + }; + + const selectOpen = (event) => { + document.querySelector('.select2-container--open .select2-search__field').focus() + }; + + back.addEventListener('keydown', backKeyDown); + back.addEventListener('click', backClick); + document.addEventListener('keydown', documentKeyDown); + $(select).on('select2:select', selectSelect); + $(select).on('select2:close', selectClose); + $(select).on('select2:open', selectOpen); + + $(document).ready(function() { + $(select).select2({ + allowClear: true, + placeholder: 'Shortcuts...', + }); + }); + + window.toggleShortcuts = toggle; + }, false); + \ No newline at end of file diff --git a/src/core/api/app/core/templates/admin/base_site.html b/src/core/api/app/core/templates/admin/base_site.html new file mode 100644 index 00000000..c267fb4f --- /dev/null +++ b/src/core/api/app/core/templates/admin/base_site.html @@ -0,0 +1,229 @@ +{% extends 'admin/base.html' %} + +{% load panel static %} + +{% block page-tools %} + +{% endblock %} + +{% block extrastyle %} + {{ block.super }} + +{% endblock %} + +{% block stylesheets %} + {{ block.super }} + + +{% endblock %} + + +{% block javascripts %} + {{ block.super }} + + + {% shortcuts as shortcuts_links %} + {{ shortcuts_links|json_script:'shortcuts-links'}} + +{% endblock %} + +{% block userlinks %} + +{% endblock %} diff --git a/src/core/api/app/core/templates/admin/combined_login.html b/src/core/api/app/core/templates/admin/combined_login.html new file mode 100644 index 00000000..e490ca13 --- /dev/null +++ b/src/core/api/app/core/templates/admin/combined_login.html @@ -0,0 +1,84 @@ +{% extends "admin/base_site.html" %} +{% load i18n static %} + +{% block extrastyle %} +{{ block.super }} + + +{{ form.media }} +{% endblock %} + +{% block bodyclass %}{{ block.super }} login{% endblock %} + +{% block usertools %}{% endblock %} + +{% block nav-global %}{% endblock %} + +{% block nav-sidebar %}{% endblock %} + +{% block content_title %}{% endblock %} + +{% block breadcrumbs %}{% endblock %} + +{% block content %} +{% if form.errors and not form.non_field_errors %} +

+ {% if form.errors.items|length == 1 %}{% translate "Please correct the error below." %}{% else %}{% translate + "Please correct the errors below." %}{% endif %} +

+{% endif %} + +{% if form.non_field_errors %} +{% for error in form.non_field_errors %} +

+ {{ error }} +

+{% endfor %} +{% endif %} + +
+ + + + {% if user.is_authenticated %} +

+ {% blocktranslate trimmed %} + You are authenticated as {{ username }}, but are not authorized to + access the admin panel. Request admin privileges to another admin user. + {% endblocktranslate %} +

+ {% else %} +
{% csrf_token %} +
+ {{ form.username.errors }} + {{ form.username.label_tag }} {{ form.username }} +
+
+ {{ form.password.errors }} + {{ form.password.label_tag }} {{ form.password }} + +
+ {% url 'admin_password_reset' as password_reset_url %} + {% if password_reset_url %} + + {% endif %} +
+ +
+
+ {% translate 'SSO Login' %} + {% endif %} +
+{% endblock %} diff --git a/src/core/api/app/core/templates/admin/sso_login.html b/src/core/api/app/core/templates/admin/sso_login.html new file mode 100644 index 00000000..c3c70b5d --- /dev/null +++ b/src/core/api/app/core/templates/admin/sso_login.html @@ -0,0 +1,64 @@ +{% extends "admin/base_site.html" %} +{% load i18n static %} + +{% block extrastyle %} +{{ block.super }} + + +{{ form.media }} +{% endblock %} + +{% block bodyclass %}{{ block.super }} login{% endblock %} + +{% block usertools %}{% endblock %} + +{% block nav-global %}{% endblock %} + +{% block nav-sidebar %}{% endblock %} + +{% block content_title %}{% endblock %} + +{% block breadcrumbs %}{% endblock %} + +{% block content %} +{% if form.errors and not form.non_field_errors %} +

+ {% if form.errors.items|length == 1 %}{% translate "Please correct the error below." %}{% else %}{% translate + "Please correct the errors below." %}{% endif %} +

+{% endif %} + +{% if form.non_field_errors %} +{% for error in form.non_field_errors %} +

+ {{ error }} +

+{% endfor %} +{% endif %} + +
+ + + + {% if user.is_authenticated %} +

+ {% blocktranslate trimmed %} + You are authenticated as {{ username }}, but are not authorized to + access the admin panel. Request admin privileges to another admin user. + {% endblocktranslate %} +

+ {% else %} + {% translate 'Login to Admin' %} + {% endif %} +
+{% endblock %} diff --git a/src/core/api/app/core/templates/admin_doc/model_index.html b/src/core/api/app/core/templates/admin_doc/model_index.html new file mode 100644 index 00000000..34d3efdb --- /dev/null +++ b/src/core/api/app/core/templates/admin_doc/model_index.html @@ -0,0 +1,60 @@ +{% extends "admin/base_site.html" %} +{% load i18n %} +{% block javascripts %} + {{ block.super }} + +{% endblock %} + +{% block bodyclass %}grp-docutils grp-model-index{% endblock %} +{% block content-class %}{% endblock %} +{% block breadcrumbs %} + +{% endblock %} +{% block title %}Models{% endblock %} + +{% block content %} +

Model documentation

+ {% regroup models|dictsort:"app_label" by app_label as grouped_models %} +
+
+
+

Model groups

+
    + {% regroup models|dictsort:"app_label" by app_label as grouped_models %} + {% for group in grouped_models %} +
  • {{ group.grouper|capfirst }}
  • + {% endfor %} +
+
+
+
+ {% for group in grouped_models %} +
+

{{ group.grouper|capfirst }}

+ + {% for model in group.list %} + + + + {% endfor %} +
{{ model.object_name }}
+
+ {% endfor %} +
+
+{% endblock %} diff --git a/src/core/api/app/core/templates/auth-exception.html b/src/core/api/app/core/templates/auth-exception.html new file mode 100644 index 00000000..0f688bdb --- /dev/null +++ b/src/core/api/app/core/templates/auth-exception.html @@ -0,0 +1,12 @@ + + +
+

Unhandled Authentication Error

+

The Authentication Workflow could not be completed.

+

+ Please try signing in again here. +

+

If the problem persists, contact Datacoves Support support@datacoves.com.

+
+ + diff --git a/src/core/api/app/core/templatetags/__init__.py b/src/core/api/app/core/templatetags/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/core/templatetags/panel.py b/src/core/api/app/core/templatetags/panel.py new file mode 100644 index 00000000..1292e30c --- /dev/null +++ b/src/core/api/app/core/templatetags/panel.py @@ -0,0 +1,45 @@ +from clusters.models import Cluster +from django import template +from django.urls import reverse +from grappelli.dashboard.utils import filter_models + +register = template.Library() + + +@register.simple_tag(takes_context=True) +def shortcuts(context): + request = context["request"] + + amap = {"view": "changelist"} + nmap = {"view": "{app} / {mp}"} + admin_links = [] + for model, views in filter_models(request, ["*"], []): + for view, active in views.items(): + if active and view in amap: + admin_links.append( + { + "name": nmap[view].format( + mp=model._meta.verbose_name_plural.title(), + ms=model._meta.verbose_name.title(), + app=model._meta.app_label.replace("_", " / ").title(), + ), + "url": reverse( + f"admin:{model._meta.app_label}_{model.__name__.lower()}_{amap[view]}" + ), + } + ) + admin_links.sort(key=lambda o: o["name"]) + + return admin_links + + +@register.simple_tag +def cluster_domain(): + cluster = Cluster.objects.current().first() + return cluster.domain if cluster else "" + + +@register.simple_tag +def cluster_admin_color(): + cluster = Cluster.objects.current().first() + return cluster.settings.get("admin_panel_color") if cluster else "black" diff --git a/src/core/api/app/core/tests/__init__.py b/src/core/api/app/core/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/core/tests/test_mixins.py b/src/core/api/app/core/tests/test_mixins.py new file mode 100644 index 00000000..c8c6d903 --- /dev/null +++ b/src/core/api/app/core/tests/test_mixins.py @@ -0,0 +1,277 @@ +from core.mixins.views import VerboseCreateModelMixin, VerboseUpdateModelMixin +from django.core.exceptions import ValidationError +from django.db import IntegrityError +from django.test import TestCase +from projects.exceptions import HookException +from rest_framework import status +from rest_framework.response import Response + + +class VerboseCreateModelMixinTest(TestCase): + def tests_return_standard_response(self): + """ + Test that a standard response is returned when the object is created. + """ + + class TestView(VerboseCreateModelMixin): + def perform_create(self, serializer): + pass + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + def get_success_headers(self, data): + return {"test": "test"} + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.create(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_201_CREATED) + self.assertEqual(response.data, {"test": "test"}) + + def tests_returns_response_on_hook_exception(self): + """ + Test that a standard response is returned when a hook exception occurs. + """ + + class TestView(VerboseCreateModelMixin): + def perform_create(self, serializer): + raise HookException("Test") + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.create(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, "Error running hook: Test (HookException)") + + def tests_returns_response_on_integrity_error(self): + """ + Test that a standard response is returned when a integrity error occurs. + """ + + class TestView(VerboseCreateModelMixin): + def perform_create(self, serializer): + raise IntegrityError("Test") + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.create(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, "Test") + + def tests_returns_response_on_validation_error(self): + """ + Test that a standard response is returned when a validation error occurs. + """ + + class TestView(VerboseCreateModelMixin): + def perform_create(self, serializer): + raise ValidationError("Test") + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.create(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, "['Test']") + + +class VerboseUpdateModelMixinTest(TestCase): + def tests_return_standard_response(self): + """ + Test that a standard response is returned when the object is updated. + """ + + class TestView(VerboseUpdateModelMixin): + def perform_update(self, serializer): + pass + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + def get_success_headers(self, data): + return {"test": "test"} + + def get_object(self): + return {"test": "test"} + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.update(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_200_OK) + self.assertEqual(response.data, {"test": "test"}) + + def tests_returns_response_on_hook_exception(self): + """ + Test that a standard response is returned when a hook exception occurs. + """ + + class TestView(VerboseUpdateModelMixin): + def perform_update(self, serializer): + raise HookException("Test") + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + def get_object(self): + return {"test": "test"} + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.update(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, "Error running hook: Test (HookException)") + + def tests_returns_response_on_integrity_error(self): + """ + Test that a standard response is returned when a integrity error occurs. + """ + + class TestView(VerboseUpdateModelMixin): + def perform_update(self, serializer): + raise IntegrityError("Test") + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + def get_object(self): + return {"test": "test"} + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.update(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, "Test") + + def tests_returns_response_on_validation_error(self): + """ + Test that a standard response is returned when a validation error occurs. + """ + + class TestView(VerboseUpdateModelMixin): + def perform_update(self, serializer): + raise ValidationError("Test") + + def get_serializer(self, *args, **kwargs): + class TestSerializer: + def is_valid(self): + return True + + @property + def data(self): + return {"test": "test"} + + return TestSerializer() + + def get_object(self): + return {"test": "test"} + + class FakeRequest: + def __init__(self, data): + self.data = data + + view = TestView() + request = FakeRequest({"test": "test"}) + response = view.update(request) + self.assertIsInstance(response, Response) + self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST) + self.assertEqual(response.data, "['Test']") diff --git a/src/core/api/app/credentials/__init__.py b/src/core/api/app/credentials/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/credentials/admin.py b/src/core/api/app/credentials/admin.py new file mode 100644 index 00000000..f795ad46 --- /dev/null +++ b/src/core/api/app/credentials/admin.py @@ -0,0 +1,43 @@ +from core.fields import EncryptedJSONField +from django.contrib import admin, messages +from django.db import transaction +from django.forms import ValidationError +from django_json_widget.widgets import JSONEditorWidget + +from datacoves.admin import BaseModelAdmin + +from .models import Secret + + +@admin.action(description="Archive selected secrets") +def archive_secrets(modeladmin, request, queryset): + try: + with transaction.atomic(): + for secret in queryset.order_by("id").all(): + secret.archive(request.user) + except ValidationError as e: + modeladmin.message_user(request, e.message, level=messages.ERROR) + + +@admin.register(Secret) +class SecretAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + EncryptedJSONField: {"widget": JSONEditorWidget}, + } + list_display = ( + "account", + "project", + "environment", + "slug", + "users", + "services", + "created_at", + "archived_at", + ) + + def account(self, obj): + return obj.project.account + + list_filter = ("tags", "project__account", "project", "environment") + search_fields = ("project__name", "slug") + actions = [archive_secrets] diff --git a/src/core/api/app/credentials/apps.py b/src/core/api/app/credentials/apps.py new file mode 100644 index 00000000..689e5ef5 --- /dev/null +++ b/src/core/api/app/credentials/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class CredentialsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "credentials" diff --git a/src/core/api/app/credentials/backends/__init__.py b/src/core/api/app/credentials/backends/__init__.py new file mode 100644 index 00000000..aa565447 --- /dev/null +++ b/src/core/api/app/credentials/backends/__init__.py @@ -0,0 +1,23 @@ +class SecretsBackend: + def __init__(self, project): + self.project = project + + def get(cls, secret) -> dict: + raise NotImplementedError() + + def create(cls, secret, value): + raise NotImplementedError() + + def update(cls, secret, value): + raise NotImplementedError() + + def delete(cls, secret): + raise NotImplementedError() + + +class SecretNotFoundException(Exception): + pass + + +class SecretAlreadyExistsException(Exception): + pass diff --git a/src/core/api/app/credentials/backends/all.py b/src/core/api/app/credentials/backends/all.py new file mode 100644 index 00000000..d3f33cb3 --- /dev/null +++ b/src/core/api/app/credentials/backends/all.py @@ -0,0 +1,5 @@ +from .aws import AWSSecretsBackend + +BACKENDS = { + "aws_secrets_manager": AWSSecretsBackend, +} diff --git a/src/core/api/app/credentials/backends/aws.py b/src/core/api/app/credentials/backends/aws.py new file mode 100644 index 00000000..2b7180e5 --- /dev/null +++ b/src/core/api/app/credentials/backends/aws.py @@ -0,0 +1,76 @@ +import json + +import boto3 +import botocore + +from . import SecretAlreadyExistsException, SecretNotFoundException, SecretsBackend + + +class AWSSecretsBackend(SecretsBackend): + def _get_client(self): + return boto3.client( + "secretsmanager", + region_name=self.project.secrets_backend_config.get("region_id"), + aws_access_key_id=self.project.secrets_backend_config.get("access_key"), + aws_secret_access_key=self.project.secrets_backend_config.get( + "access_secret_key" + ), + ) + + def _get_secret_id(self, secret) -> str: + return f"datacoves/{self.project.slug}/{secret.id}" + + def get(self, secret) -> dict: + client = self._get_client() + try: + secret = client.get_secret_value(SecretId=self._get_secret_id(secret)) + except botocore.exceptions.ClientError as err: + if ( + err.response["Error"]["Code"] == "ResourceNotFoundException" + or err.response["Error"]["Code"] == "InvalidRequestException" + ): + raise SecretNotFoundException() + else: + raise + return json.loads(secret["SecretString"]) + + def create(self, secret, value): + client = self._get_client() + try: + client.create_secret( + Name=self._get_secret_id(secret), + Description=secret.slug, + SecretString=json.dumps(value), + ) + except botocore.exceptions.ClientError as err: + if err.response["Error"]["Code"] == "ResourceExistsException": + raise SecretAlreadyExistsException() + else: + raise + + def update(self, secret, value): + client = self._get_client() + try: + client.update_secret( + SecretId=self._get_secret_id(secret), + Description=secret.slug, + SecretString=json.dumps(value), + ) + except botocore.exceptions.ClientError as err: + if ( + err.response["Error"]["Code"] == "ResourceNotFoundException" + or err.response["Error"]["Code"] == "InvalidRequestException" + ): + raise SecretNotFoundException() + else: + raise + + def delete(self, secret): + client = self._get_client() + try: + client.delete_secret(SecretId=self._get_secret_id(secret)) + except botocore.exceptions.ClientError as err: + if err.response["Error"]["Code"] == "ResourceNotFoundException": + raise SecretNotFoundException() + else: + raise diff --git a/src/core/api/app/credentials/migrations/0001_initial.py b/src/core/api/app/credentials/migrations/0001_initial.py new file mode 100644 index 00000000..1fd13ff5 --- /dev/null +++ b/src/core/api/app/credentials/migrations/0001_initial.py @@ -0,0 +1,41 @@ +# Generated by Django 3.2.16 on 2023-01-12 19:38 + +import autoslug.fields +import core.fields +import django.db.models.deletion +import taggit.managers +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('taggit', '0005_auto_20220424_2025'), + ('projects', '0071_auto_20221228_1642'), + ] + + operations = [ + migrations.CreateModel( + name='Secret', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.CharField(max_length=100)), + ('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)), + ('value', core.fields.EncryptedJSONField(default=dict, editable=True)), + ('archived_at', models.DateTimeField(blank=True, editable=False, null=True)), + ('archived_by', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='archived_secrets', to=settings.AUTH_USER_MODEL)), + ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_secrets', to=settings.AUTH_USER_MODEL)), + ('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='secrets', to='projects.project')), + ('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags')), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/credentials/migrations/0002_auto_20230303_1844.py b/src/core/api/app/credentials/migrations/0002_auto_20230303_1844.py new file mode 100644 index 00000000..268a18a3 --- /dev/null +++ b/src/core/api/app/credentials/migrations/0002_auto_20230303_1844.py @@ -0,0 +1,49 @@ +# Generated by Django 3.2.16 on 2023-03-03 18:44 + +import autoslug.fields +from django.db import migrations, models +import django.db.models.deletion +import taggit.managers + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0076_update_account_permissions"), + ("taggit", "0005_auto_20220424_2025"), + ("credentials", "0001_initial"), + ] + + operations = [ + migrations.AlterField( + model_name="secret", + name="project", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="secrets", + to="projects.project", + ), + ), + migrations.AlterField( + model_name="secret", + name="slug", + field=autoslug.fields.AutoSlugField(editable=False, populate_from="name"), + ), + migrations.AlterField( + model_name="secret", + name="tags", + field=taggit.managers.TaggableManager( + blank=True, + help_text="A comma-separated list of tags.", + through="taggit.TaggedItem", + to="taggit.Tag", + verbose_name="Tags", + ), + ), + migrations.AddConstraint( + model_name="secret", + constraint=models.UniqueConstraint( + fields=("project", "slug"), name="Project and slug uniqueness" + ), + ), + ] diff --git a/src/core/api/app/credentials/migrations/0003_auto_20240710_1542.py b/src/core/api/app/credentials/migrations/0003_auto_20240710_1542.py new file mode 100644 index 00000000..341d9a5f --- /dev/null +++ b/src/core/api/app/credentials/migrations/0003_auto_20240710_1542.py @@ -0,0 +1,84 @@ +# Generated by Django 3.2.20 on 2024-07-10 15:42 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0105_auto_20240701_2118"), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ("credentials", "0002_auto_20230303_1844"), + ] + + operations = [ + migrations.AddField( + model_name="secret", + name="description", + field=models.TextField(blank=True), + ), + migrations.AddField( + model_name="secret", + name="environment", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.CASCADE, + related_name="secrets", + to="projects.environment", + ), + ), + migrations.AddField( + model_name="secret", + name="services", + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name="secret", + name="sharing_scope", + field=models.CharField( + choices=[ + ("project", "Shared within a project"), + ("environment", "Shared within an environment"), + ], + default="project", + max_length=20, + ), + ), + migrations.AddField( + model_name="secret", + name="users", + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name="secret", + name="value_format", + field=models.CharField( + choices=[ + ("plain_text", "Plain text"), + ("dict", "Key-value string pairs"), + ("json", "Raw JSON"), + ], + default="plain_text", + max_length=20, + ), + ), + migrations.AlterField( + model_name="secret", + name="created_by", + field=models.ForeignKey( + default=1, + on_delete=django.db.models.deletion.PROTECT, + related_name="created_secrets", + to="users.user", + ), + preserve_default=False, + ), + migrations.AlterField( + model_name="secret", + name="slug", + field=models.CharField(max_length=200), + ), + ] diff --git a/src/core/api/app/credentials/migrations/0004_secret_credentials_sharing_c78bb7_idx_and_more.py b/src/core/api/app/credentials/migrations/0004_secret_credentials_sharing_c78bb7_idx_and_more.py new file mode 100644 index 00000000..6f4f036b --- /dev/null +++ b/src/core/api/app/credentials/migrations/0004_secret_credentials_sharing_c78bb7_idx_and_more.py @@ -0,0 +1,37 @@ +# Generated by Django 5.0.7 on 2024-08-12 16:03 + +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("credentials", "0003_auto_20240710_1542"), + ("taggit", "0005_auto_20220424_2025"), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.AddIndex( + model_name="secret", + index=models.Index( + fields=["sharing_scope", "project_id", "slug"], + name="credentials_sharing_c78bb7_idx", + ), + ), + migrations.AddIndex( + model_name="secret", + index=models.Index( + fields=["sharing_scope", "project_id", "environment_id", "slug"], + name="credentials_sharing_88ede7_idx", + ), + ), + migrations.AddIndex( + model_name="secret", + index=models.Index( + fields=["sharing_scope", "project_id", "users", "services", "slug"], + name="credentials_sharing_c9a788_idx", + ), + ), + ] diff --git a/src/core/api/app/credentials/migrations/0005_secret_accessed_at_secret_backend.py b/src/core/api/app/credentials/migrations/0005_secret_accessed_at_secret_backend.py new file mode 100644 index 00000000..47c35a36 --- /dev/null +++ b/src/core/api/app/credentials/migrations/0005_secret_accessed_at_secret_backend.py @@ -0,0 +1,30 @@ +# Generated by Django 5.0.7 on 2024-09-02 15:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("credentials", "0004_secret_credentials_sharing_c78bb7_idx_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="secret", + name="accessed_at", + field=models.DateTimeField(blank=True, editable=False, null=True), + ), + migrations.AddField( + model_name="secret", + name="backend", + field=models.CharField( + choices=[ + ("datacoves", "Datacoves"), + ("aws_secrets_manager", "AWS Secrets Manager"), + ], + default="datacoves", + max_length=50, + ), + ), + ] diff --git a/src/core/api/app/credentials/migrations/0006_remove_secret_name.py b/src/core/api/app/credentials/migrations/0006_remove_secret_name.py new file mode 100644 index 00000000..12bcfa9c --- /dev/null +++ b/src/core/api/app/credentials/migrations/0006_remove_secret_name.py @@ -0,0 +1,17 @@ +# Generated by Django 5.0.7 on 2024-11-01 20:48 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('credentials', '0005_secret_accessed_at_secret_backend'), + ] + + operations = [ + migrations.RemoveField( + model_name='secret', + name='name', + ), + ] diff --git a/src/core/api/app/credentials/migrations/__init__.py b/src/core/api/app/credentials/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/credentials/models/__init__.py b/src/core/api/app/credentials/models/__init__.py new file mode 100644 index 00000000..57adacc5 --- /dev/null +++ b/src/core/api/app/credentials/models/__init__.py @@ -0,0 +1 @@ +from .secret import * # noqa: F401,F403 diff --git a/src/core/api/app/credentials/models/secret.py b/src/core/api/app/credentials/models/secret.py new file mode 100644 index 00000000..cbeed078 --- /dev/null +++ b/src/core/api/app/credentials/models/secret.py @@ -0,0 +1,155 @@ +import re + +from core.fields import EncryptedJSONField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.core.exceptions import ValidationError +from django.db import models +from django.utils.timezone import now +from taggit.managers import TaggableManager +from users.models import User + + +class Secret(AuditModelMixin, DatacovesModel): + """Project-Level Secret storage + + Stores an arbitrary JSON field in the 'value' field. Secrets are + associated with :model:`projects.Project` or :model:`projects.Environment` + (scope) and thus exist on a per-project level. + This model can be also used to store User level secrets, when 'services' and + 'users' are set to False. + + ======= + Methods + ======= + + - **archve(archiver)** - Archives the secret, as done by user 'archiver' + """ + + SHARED_PROJECT = "project" + SHARED_ENVIRONMENT = "environment" + SHARING_SCOPES = ( + (SHARED_PROJECT, "Shared within a project"), + (SHARED_ENVIRONMENT, "Shared within an environment"), + ) + VALUE_FORMAT_PLAIN_TEXT = "plain_text" + VALUE_FORMAT_KEY_VALUE = "dict" + VALUE_FORMAT_JSON = "json" + VALUE_FORMATS = ( + (VALUE_FORMAT_PLAIN_TEXT, "Plain text"), + (VALUE_FORMAT_KEY_VALUE, "Key-value string pairs"), + (VALUE_FORMAT_JSON, "Raw JSON"), + ) + SECRETS_BACKEND_DATACOVES = "datacoves" + SECRETS_BACKEND_AWS = "aws_secrets_manager" + SECRETS_BACKENDS = ( + ( + SECRETS_BACKEND_DATACOVES, + "Datacoves", + ), + ( + SECRETS_BACKEND_AWS, + "AWS Secrets Manager", + ), + ) + + description = models.TextField(blank=True) + tags = TaggableManager(blank=True) + + slug = models.CharField(max_length=200) + backend = models.CharField( + max_length=50, choices=SECRETS_BACKENDS, default=SECRETS_BACKEND_DATACOVES + ) + value_format = models.CharField( + max_length=20, choices=VALUE_FORMATS, default=VALUE_FORMAT_PLAIN_TEXT + ) + value = EncryptedJSONField(default=dict) + + sharing_scope = models.CharField( + max_length=20, choices=SHARING_SCOPES, default=SHARED_PROJECT + ) + project = models.ForeignKey( + "projects.Project", + on_delete=models.CASCADE, + related_name="secrets", + ) + environment = models.ForeignKey( + "projects.Environment", + on_delete=models.CASCADE, + related_name="secrets", + null=True, + blank=True, + ) + services = models.BooleanField(default=False) + users = models.BooleanField(default=False) + + created_by = models.ForeignKey( + User, + on_delete=models.PROTECT, + related_name="created_secrets", + ) + accessed_at = models.DateTimeField(editable=False, blank=True, null=True) + archived_at = models.DateTimeField(editable=False, blank=True, null=True) + archived_by = models.ForeignKey( + User, + on_delete=models.SET_NULL, + related_name="archived_secrets", + editable=False, + blank=True, + null=True, + ) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["project", "slug"], + name="Project and slug uniqueness", + ) + ] + indexes = [ + models.Index(fields=["sharing_scope", "project_id", "slug"]), + models.Index( + fields=["sharing_scope", "project_id", "environment_id", "slug"] + ), + models.Index( + fields=["sharing_scope", "project_id", "users", "services", "slug"] + ), + ] + + def __str__(self) -> str: + return self.slug + + @property + def created_by_name(self): + return self.created_by.name + + @property + def created_by_email(self): + return self.created_by.email + + @property + def is_system(self) -> bool: + """Whether the system was created by a Service Account""" + return self.created_by.is_service_account + + def save(self, *args, **kwargs): + if self.sharing_scope == self.SHARED_ENVIRONMENT: + self.project = self.environment.project + # We suffix the slug with environment slug to avoid uniqueness constraint violations + if self.services or self.users: + slug = re.sub(r".+\|", "", self.slug) + self.slug = f"{self.environment.slug}|{slug}" + else: + self.environment = None + if not self.services and not self.users: + slug = re.sub(r".+\|", "", self.slug) + self.slug = f"{self.created_by.slug}|{slug}" + super().save(*args, **kwargs) + + def archive(self, archiver): + if not self.archived_at: + self.archived_at = now + self.archived_by = archiver + self.save() + else: + raise ValidationError("Secret was already archived") diff --git a/src/core/api/app/credentials/permissions.py b/src/core/api/app/credentials/permissions.py new file mode 100644 index 00000000..c9973c1c --- /dev/null +++ b/src/core/api/app/credentials/permissions.py @@ -0,0 +1,10 @@ +from clusters.request_utils import get_cluster +from rest_framework import permissions + + +class IsSecretsAdminEnabled(permissions.BasePermission): + message = "Integrations admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_secrets"] diff --git a/src/core/api/app/credentials/serializers.py b/src/core/api/app/credentials/serializers.py new file mode 100644 index 00000000..3ea616a6 --- /dev/null +++ b/src/core/api/app/credentials/serializers.py @@ -0,0 +1,125 @@ +import re + +from core.serializers import EncodedValueField +from django.http import Http404 +from rest_framework import serializers +from taggit.serializers import TaggitSerializer, TagListSerializerField + +from .backends import SecretAlreadyExistsException, SecretNotFoundException +from .backends.all import BACKENDS +from .models import Secret + + +class PublicSecretSerializer(TaggitSerializer, serializers.ModelSerializer): + class Meta: + model = Secret + fields = ("slug", "value") + + def to_representation(self, instance): + rep = super().to_representation(instance) + # Get value from backend if exists + backend = BACKENDS.get(instance.project.secrets_backend) + if backend: + try: + rep["value"] = backend(instance.project).get(instance) + except SecretNotFoundException: + raise Http404 + # Convert plain_text secret types + if "PLAIN_TEXT_VALUE" in rep["value"]: + rep["value"] = rep["value"]["PLAIN_TEXT_VALUE"] + rep["slug"] = re.sub(r".+\|", "", rep["slug"]) + return rep + + +class SecretSerializer(TaggitSerializer, serializers.ModelSerializer): + tags = TagListSerializerField() + value = EncodedValueField() + + class Meta: + model = Secret + fields = ( + "id", + "slug", + "tags", + "description", + "value_format", + "value", + "sharing_scope", + "project", + "environment", + "users", + "services", + "created_by_name", + "created_by_email", + "accessed_at", + "backend", + "is_system", + ) + + def create(self, validated_data): + validated_data["created_by"] = self.context.get("request").user + backend = BACKENDS.get(validated_data["project"].secrets_backend) + validated_data["backend"] = validated_data["project"].secrets_backend + + if backend: + value = validated_data["value"] + validated_data["value"] = {} + + else: + if not validated_data["slug"].startswith("datacoves-"): + validated_data["slug"] = "datacoves-" + validated_data["slug"] + + secret = super().create(validated_data) + + if backend: + try: + backend(secret.project).create(secret, value) + except SecretAlreadyExistsException: + backend(secret.project).update(secret, value) + return secret + + def update(self, instance, validated_data): + backend = BACKENDS.get(instance.project.secrets_backend) + create = False + if backend: + try: + current_value = backend(instance.project).get(instance) + except SecretNotFoundException: + # if switching from datacoves backend. + if instance.backend == "datacoves": + create = True + current_value = instance.value + else: + if not validated_data["slug"].startswith("datacoves-"): + validated_data["slug"] = "datacoves-" + validated_data["slug"] + + current_value = instance.value + + validated_data["value"] = EncodedValueField.decode_values( + current_value, validated_data["value"].copy(), {} + ) + validated_data["backend"] = instance.project.secrets_backend + if backend: + value = validated_data["value"] + validated_data["value"] = {} + secret = super().update(instance, validated_data) + if backend: + if create: + backend(instance.project).create(secret, value) + else: + backend(instance.project).update(secret, value) + return secret + + def to_representation(self, instance): + rep = super().to_representation(instance) + backend = BACKENDS.get(instance.backend) + if backend: + try: + rep["value"] = self.fields["value"].to_representation( + backend(instance.project).get(instance) + ) + except SecretNotFoundException: + rep["value"] = "" + rep["secrets_backend_error"] = "not_found" + rep["slug"] = re.sub(r".+\|", "", rep["slug"]) + return rep diff --git a/src/core/api/app/credentials/views.py b/src/core/api/app/credentials/views.py new file mode 100644 index 00000000..1032e742 --- /dev/null +++ b/src/core/api/app/credentials/views.py @@ -0,0 +1,161 @@ +from core.mixins.views import ( + AddAccountToContextMixin, + VerboseCreateModelMixin, + VerboseUpdateModelMixin, +) +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db.models import Q +from django.utils import timezone +from django_filters.rest_framework import DjangoFilterBackend +from iam.permissions import HasResourcePermission +from projects.models import Environment +from rest_framework import filters, generics, status +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response + +from .backends import SecretNotFoundException +from .backends.all import BACKENDS +from .models import Secret +from .permissions import IsSecretsAdminEnabled +from .serializers import ( # RetrieveSecretSerializer, + PublicSecretSerializer, + SecretSerializer, +) + + +class PublicSecretList( + generics.ListCreateAPIView, +): + """ + List all secrets by account, or creates a new secret. + """ + + throttle_classes = [] # TODO: Set appropriate limits + serializer_class = PublicSecretSerializer + permission_classes = [IsAuthenticated] + + def get_queryset(self): + """ + Optionally restricts the returned secrets to a given slug, key and tags, + by filtering against query parameters in the URL. + + This endpoint receives a mandatory environment slug since it's used to + determine permissions. + """ + + key_filter = "services" if self.request.user.is_service_account else "users" + extra_filters = {key_filter: True} + + env_slug = self.kwargs["slug"] + env_id, project_slug, project_id = Environment.objects.values_list( + "id", "project__slug", "project__id" + ).filter(slug=env_slug)[0] + user_projects, user_envs = self.request.user.project_and_env_slugs() + + is_secrets_admin = ( + self.request.user.permissions.filter( + name__icontains=f"|{settings.ADMIN_SECRETS_RESOURCE}" + ).count() + > 0 + ) + + if is_secrets_admin: + filters = Q() + else: + # Start by getting all secrets created by the current user + filters = Q(created_by=self.request.user) + # if user has access to at least one resource in the environment or project + if env_slug in user_envs or project_slug in user_projects: + filters |= Q( + sharing_scope=Secret.SHARED_ENVIRONMENT, + environment=env_id, + **extra_filters, + ) + filters |= Q( + sharing_scope=Secret.SHARED_PROJECT, + **extra_filters, + ) + + queryset = ( + Secret.objects.filter(project=project_id).filter(filters).order_by("slug") + ) + + slug = self.request.query_params.get("slug") + if slug: + queryset = queryset.filter( + Q(sharing_scope=Secret.SHARED_PROJECT, slug=slug.lower()) + | Q( + sharing_scope=Secret.SHARED_ENVIRONMENT, + slug=f"{env_slug}|{slug.lower()}", + ) + | Q( + users=False, + services=False, + slug=f"{self.request.user.slug}|{slug.lower()}", + ) + ) + else: + tags = self.request.query_params.getlist("tags") + if tags: + queryset = queryset.filter(tags__name__in=tags).distinct() + + queryset.update(accessed_at=timezone.now()) + return queryset + + +class SecretMixin(AddAccountToContextMixin): + throttle_classes = [] # TODO: Set appropriate limits + serializer_class = SecretSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsSecretsAdminEnabled, + ] + + def get_queryset(self): + return Secret.objects.filter( + project__account__slug=self.kwargs.get("account_slug") + ).order_by("slug") + + +class SecretList(SecretMixin, VerboseCreateModelMixin, generics.ListCreateAPIView): + throttle_classes = [] # TODO: Set appropriate limits + filter_backends = [filters.SearchFilter, DjangoFilterBackend] + serializer_class = SecretSerializer + search_fields = ["slug"] + + +class SecretDetail( + SecretMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + throttle_classes = [] # TODO: Set appropriate limits + + def perform_destroy(self, instance): + backend = BACKENDS.get(instance.project.secrets_backend) + if backend: + try: + backend(instance.project).delete(instance) + return Response(status=status.HTTP_204_NO_CONTENT) + except SecretNotFoundException: + pass + elif instance.is_system: + return Response( + "Secrets created by system can not be modified.", + status=status.HTTP_400_BAD_REQUEST, + ) + else: + try: + super().perform_destroy(instance) + return Response(status=status.HTTP_204_NO_CONTENT) + except ValidationError as ex: + return Response( + ex.message, + status=status.HTTP_400_BAD_REQUEST, + ) + + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + return self.perform_destroy(instance) diff --git a/src/core/api/app/datacoves/__init__.py b/src/core/api/app/datacoves/__init__.py new file mode 100644 index 00000000..5568b6d7 --- /dev/null +++ b/src/core/api/app/datacoves/__init__.py @@ -0,0 +1,5 @@ +# This will make sure the app is always imported when +# Django starts so that shared_task will use this app. +from .celery import app as celery_app + +__all__ = ("celery_app",) diff --git a/src/core/api/app/datacoves/admin.py b/src/core/api/app/datacoves/admin.py new file mode 100644 index 00000000..a217dc7d --- /dev/null +++ b/src/core/api/app/datacoves/admin.py @@ -0,0 +1,236 @@ +import datetime + +from django.contrib.admin import BooleanFieldListFilter, DateFieldListFilter +from django.db import models +from django.utils import timezone +from django.utils.translation import gettext_lazy as _ + + +class BaseModelAdmin: + change_list_template = "admin/change_list_filter_sidebar.html" + + +class DefaultNoBooleanFilter(BooleanFieldListFilter): + def choices(self, changelist): + field_choices = dict(self.field.flatchoices) + # Switching All by No and vice-versa + for lookup, title in ( + (None, _("No")), + ("1", field_choices.get(True, _("Yes"))), + ("0", field_choices.get(False, _("All"))), + ): + yield { + "selected": self.lookup_val == lookup and not self.lookup_val2, + "query_string": changelist.get_query_string( + {self.lookup_kwarg: lookup}, [self.lookup_kwarg2] + ), + "display": title, + } + if self.field.null: + yield { + "selected": self.lookup_val2 == "True", + "query_string": changelist.get_query_string( + {self.lookup_kwarg2: "True"}, [self.lookup_kwarg] + ), + "display": field_choices.get(None, _("Unknown")), + } + + def queryset(self, request, queryset): + # Switching All by No and vice-versa + if ( + self.lookup_kwarg in self.used_parameters + and isinstance(self.used_parameters[self.lookup_kwarg], list) + and len(self.used_parameters[self.lookup_kwarg]) + and not int(self.used_parameters[self.lookup_kwarg][0]) + ): + # This handles the 'all' case + del self.used_parameters[self.lookup_kwarg] + elif ( + not self.used_parameters + or self.used_parameters.get(self.lookup_kwarg) is None + ): + self.used_parameters[self.lookup_kwarg] = [0] + + return super().queryset(request, queryset) + + +class DateFieldListFilterExtended(DateFieldListFilter): + def __init__(self, field, request, params, model, model_admin, field_path): + self.field_generic = "%s__" % field_path + self.date_params = { + k: v[-1] for k, v in params.items() if k.startswith(self.field_generic) + } + + now = timezone.now() + # When time zone support is enabled, convert "now" to the user's time + # zone so Django's definition of "Today" matches what the user expects. + if timezone.is_aware(now): + now = timezone.localtime(now) + + if isinstance(field, models.DateTimeField): + today = now.replace(hour=0, minute=0, second=0, microsecond=0) + else: # field is a models.DateField + today = now.date() + tomorrow = today + datetime.timedelta(days=1) + if today.month == 12: + next_month = today.replace(year=today.year + 1, month=1, day=1) + else: + next_month = today.replace(month=today.month + 1, day=1) + if today.month == 1: + last_month = today.replace(year=today.year - 1, month=12, day=1) + else: + last_month = today.replace(month=today.month - 1, day=1) + next_year = today.replace(year=today.year + 1, month=1, day=1) + + self.lookup_kwarg_since = "%s__gte" % field_path + self.lookup_kwarg_until = "%s__lt" % field_path + self.links = ( + (_("Any date"), {}), + ( + _("Today"), + { + self.lookup_kwarg_since: today, + self.lookup_kwarg_until: tomorrow, + }, + ), + ( + _("Past 7 days"), + { + self.lookup_kwarg_since: today - datetime.timedelta(days=7), + self.lookup_kwarg_until: tomorrow, + }, + ), + ( + _("This month"), + { + self.lookup_kwarg_since: today.replace(day=1), + self.lookup_kwarg_until: next_month, + }, + ), + ( + _("Last month"), + { + self.lookup_kwarg_since: last_month, + self.lookup_kwarg_until: today.replace(day=1), + }, + ), + ( + _("Past 30 days"), + { + self.lookup_kwarg_since: today - datetime.timedelta(days=30), + self.lookup_kwarg_until: tomorrow, + }, + ), + ( + _("Past 180 days"), + { + self.lookup_kwarg_since: today - datetime.timedelta(days=180), + self.lookup_kwarg_until: tomorrow, + }, + ), + ( + _("This year"), + { + self.lookup_kwarg_since: today.replace(month=1, day=1), + self.lookup_kwarg_until: next_year, + }, + ), + ( + _("Last year"), + { + self.lookup_kwarg_since: today.replace( + year=today.year - 1, month=1, day=1 + ), + self.lookup_kwarg_until: today.replace(month=1, day=1), + }, + ), + ) + if field.null: + self.lookup_kwarg_isnull = "%s__isnull" % field_path + self.links += ( + (_("No date"), {self.field_generic + "isnull": True}), + (_("Has date"), {self.field_generic + "isnull": False}), + ) + super(DateFieldListFilter, self).__init__( + field, request, params, model, model_admin, field_path + ) + + +class DeactivatedDateFilter(DateFieldListFilter): + def __init__(self, field, request, params, model, model_admin, field_path): + self.field_generic = "%s__" % field_path + self.date_params = { + k: v[-1] for k, v in params.items() if k.startswith(self.field_generic) + } + + now = timezone.now() + # When time zone support is enabled, convert "now" to the user's time + # zone so Django's definition of "Today" matches what the user expects. + if timezone.is_aware(now): + now = timezone.localtime(now) + + if isinstance(field, models.DateTimeField): + today = now.replace(hour=0, minute=0, second=0, microsecond=0) + else: # field is a models.DateField + today = now.date() + tomorrow = today + datetime.timedelta(days=1) + if today.month == 12: + next_month = today.replace(year=today.year + 1, month=1, day=1) + else: + next_month = today.replace(month=today.month + 1, day=1) + next_year = today.replace(year=today.year + 1, month=1, day=1) + + self.lookup_kwarg_since = "%s__gte" % field_path + self.lookup_kwarg_until = "%s__lt" % field_path + self.lookup_kwarg_isnull = "%s__isnull" % field_path + self.links = ( + # this used to be the `Any date` value + (_("Active"), {}), + (_("Inactive"), {self.lookup_kwarg_isnull: False}), + # this used to be the `No date` value + (_("All"), {self.lookup_kwarg_isnull: True}), + ( + _("Today"), + { + self.lookup_kwarg_since: today, + self.lookup_kwarg_until: tomorrow, + }, + ), + ( + _("Past 7 days"), + { + self.lookup_kwarg_since: today - datetime.timedelta(days=7), + self.lookup_kwarg_until: tomorrow, + }, + ), + ( + _("This month"), + { + self.lookup_kwarg_since: today.replace(day=1), + self.lookup_kwarg_until: next_month, + }, + ), + ( + _("This year"), + { + self.lookup_kwarg_since: today.replace(month=1, day=1), + self.lookup_kwarg_until: next_year, + }, + ), + ) + super(DateFieldListFilter, self).__init__( + field, request, params, model, model_admin, field_path + ) + + def queryset(self, request, queryset): + if ( + self.used_parameters.get(self.lookup_kwarg_isnull) + and self.used_parameters.get(self.lookup_kwarg_isnull)[0] + ): + del self.used_parameters[self.lookup_kwarg_isnull] + elif ( + not self.used_parameters + or self.used_parameters.get(self.lookup_kwarg_isnull) is None + ): + self.used_parameters[self.lookup_kwarg_isnull] = [True] + return super().queryset(request, queryset) diff --git a/src/core/api/app/datacoves/asgi.py b/src/core/api/app/datacoves/asgi.py new file mode 100644 index 00000000..b43123dd --- /dev/null +++ b/src/core/api/app/datacoves/asgi.py @@ -0,0 +1,30 @@ +""" +ASGI config for datacoves project. + +It exposes the ASGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ +""" + +import os + +from channels.auth import AuthMiddlewareStack +from channels.routing import ProtocolTypeRouter, URLRouter +from channels.security.websocket import AllowedHostsOriginValidator +from django.core.asgi import get_asgi_application + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "datacoves.settings") + +application = get_asgi_application() + +from .routing import websocket_urlpatterns # noqa + +application = ProtocolTypeRouter( + { + "http": application, + "websocket": AllowedHostsOriginValidator( + AuthMiddlewareStack(URLRouter(websocket_urlpatterns)) + ), + } +) diff --git a/src/core/api/app/datacoves/celery.py b/src/core/api/app/datacoves/celery.py new file mode 100644 index 00000000..6cb5b8ba --- /dev/null +++ b/src/core/api/app/datacoves/celery.py @@ -0,0 +1,24 @@ +import logging +import os + +from celery import Celery + +# Set the default Django settings module for the 'celery' program. +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "datacoves.settings") + +logger = logging.getLogger(__name__) +app = Celery("datacoves") + +# Using a string here means the worker doesn't have to serialize +# the configuration object to child processes. +# - namespace='CELERY' means all celery-related configuration keys +# should have a `CELERY_` prefix. +app.config_from_object("django.conf:settings", namespace="CELERY") + +# Load task modules from all registered Django apps. +app.autodiscover_tasks() + + +@app.task(bind=True) +def debug_task(self): + logger.info("Request %s", self.request) diff --git a/src/core/api/app/datacoves/csrf_exempt_auth_class.py b/src/core/api/app/datacoves/csrf_exempt_auth_class.py new file mode 100644 index 00000000..d813a755 --- /dev/null +++ b/src/core/api/app/datacoves/csrf_exempt_auth_class.py @@ -0,0 +1,6 @@ +from rest_framework.authentication import SessionAuthentication + + +class CsrfExemptSessionAuthentication(SessionAuthentication): + def enforce_csrf(self, request): + return # diff --git a/src/core/api/app/datacoves/integration_tests_settings.py b/src/core/api/app/datacoves/integration_tests_settings.py new file mode 100644 index 00000000..8f7f6f71 --- /dev/null +++ b/src/core/api/app/datacoves/integration_tests_settings.py @@ -0,0 +1,20 @@ +from datacoves.settings import * # noqa +from datacoves.settings import LOGGING + +CELERY_ALWAYS_EAGER = True +RUN_TASKS_SYNCHRONOUSLY = True +CELERY_TASK_ALWAYS_EAGER = True +CORS_ALLOW_ALL_ORIGINS = True + +# Uncomment to debug issues +# LOGGING["root"]["level"] = "DEBUG" + +integration_test_loggers = { + "projects.git": { + "handlers": ["console"], + "level": "DEBUG", + "propagate": False, + }, +} + +LOGGING["loggers"].update(integration_test_loggers) diff --git a/src/core/api/app/datacoves/routing.py b/src/core/api/app/datacoves/routing.py new file mode 100644 index 00000000..0a8f0002 --- /dev/null +++ b/src/core/api/app/datacoves/routing.py @@ -0,0 +1,8 @@ +from django.urls import re_path +from projects import consumers + +websocket_urlpatterns = [ + re_path( + r"ws/account/(?P[\w\d-]+)/$", consumers.AccountConsumer.as_asgi() + ), +] diff --git a/src/core/api/app/datacoves/settings.py b/src/core/api/app/datacoves/settings.py new file mode 100644 index 00000000..09901a17 --- /dev/null +++ b/src/core/api/app/datacoves/settings.py @@ -0,0 +1,816 @@ +import base64 +import json +import re +import socket +import sys +from datetime import datetime, timedelta +from os import environ +from pathlib import Path + +import sentry_sdk +from celery.schedules import crontab +from corsheaders import defaults +from sentry_sdk.integrations.django import DjangoIntegration + +# Utils + +BASE_DIR = Path(__file__).resolve().parent.parent # Usage: BASE_DIR / 'subdir' + + +def to_bool(s): + return s.lower() in ("yes", "y", "true", "t", "1") + + +def env_bool(key, default=False): + return to_bool(environ[key]) if key in environ else default + + +# Global, general settings + +SECRET_KEY = environ.get("SECRET_KEY") + +FERNET_KEY = environ.get("FERNET_KEY") + +DEBUG = env_bool("DEBUG") + +WSGI_APPLICATION = "datacoves.wsgi.application" +ASGI_APPLICATION = "datacoves.asgi.application" + +REDIS_URL = environ.get("REDIS_URI") + +CHANNEL_LAYERS = { + "default": { + "BACKEND": "channels_redis.core.RedisChannelLayer", + "CONFIG": { + "hosts": [REDIS_URL], + }, + }, +} + +ROOT_URLCONF = "datacoves.urls" + +ALLOWED_HOSTS = [ + socket.gethostbyname(socket.gethostname()), # Prometheus monitoring + "core-api-svc.core.svc", + "core-api-svc.core", + "core-api-svc", + "api.datacoveslocal.com", + ".datacoveslocal.com", +] + environ.get("ALLOWED_HOSTS", "").split(",") + +CORS_ORIGIN_ALLOW_ALL = False + +BASE_DOMAIN = environ.get("BASE_DOMAIN") + +CORS_ALLOWED_ORIGIN_REGEXES = ( + [ + rf".*{re.escape(BASE_DOMAIN)}", + ] + if BASE_DOMAIN + else [] +) +# New header required by sentry to trace requests +CORS_ALLOW_HEADERS = defaults.default_headers + ("sentry-trace",) + +CORS_ALLOW_CREDENTIALS = True + +# INSTALLED_APPS reads from the bottom. +# Add new apps at the top of a section, not at the bottom. +INSTALLED_APPS = [ + "daphne", + # datacoves apps + "core.apps.CoreConfig", + "clusters.apps.ClustersConfig", + "projects.apps.ProjectsConfig", + "integrations.apps.IntegrationsConfig", + "invitations.apps.InvitationsConfig", + "iam.apps.IAMConfig", + "users.apps.UsersConfig", + "codegen.apps.CodegenConfig", + "billing.apps.BillingConfig", + "notifications.apps.NotificationsConfig", + "credentials.apps.CredentialsConfig", + # libraries + "corsheaders", + "django_extensions", + "django_object_actions", + "social_django", + "rest_framework", + "rest_framework.authtoken", + "rest_framework_simplejwt", + "django_filters", + "django_json_widget", + "django_celery_results", + "django_celery_beat", + "django_prometheus", + "taggit", + "grappelli", + "csvexport", + # health check apps + "health_check", + "health_check.db", + "health_check.cache", + "health_check.storage", + "health_check.contrib.migrations", + "health_check.contrib.celery", + "health_check.contrib.celery_ping", + "health_check.contrib.psutil", + "health_check.contrib.redis", + # django.contrib + "django.contrib.admin", + "django.contrib.auth", + "django.contrib.contenttypes", + "django.contrib.sessions", + "django.contrib.messages", + "django.contrib.staticfiles", + "oauth2_provider", + "django.contrib.admindocs", + "knox", +] + +MIDDLEWARE = [ + "django_prometheus.middleware.PrometheusBeforeMiddleware", + "clusters.metrics.DatacovesPrometheusMetricMiddleware", + "django.middleware.security.SecurityMiddleware", + "django.contrib.sessions.middleware.SessionMiddleware", + "django.middleware.common.CommonMiddleware", + "django.middleware.csrf.CsrfViewMiddleware", + "corsheaders.middleware.CorsMiddleware", + "core.middleware.AuthErrorHandlerMiddleware", + "django.contrib.auth.middleware.AuthenticationMiddleware", + "django.contrib.messages.middleware.MessageMiddleware", + "django.middleware.clickjacking.XFrameOptionsMiddleware", + "oauth2_provider.middleware.OAuth2TokenMiddleware", + "django.contrib.admindocs.middleware.XViewMiddleware", + "django_prometheus.middleware.PrometheusAfterMiddleware", +] + +DATABASES = { + "default": { + "ENGINE": "django_prometheus.db.backends.postgresql", + "NAME": environ.get("DB_NAME"), + "USER": environ.get("DB_USER"), + "PASSWORD": environ.get("DB_PASS"), + "HOST": environ.get("DB_HOST", "db"), + "PORT": environ.get("DB_PORT", "5432"), + }, +} + +CACHES = { + "default": { + "BACKEND": "django_prometheus.cache.backends.redis.RedisCache", + "LOCATION": REDIS_URL, + } +} + +TESTING = "test" in sys.argv + +DEBUG_TOOLBAR_CONFIG = { + "SHOW_COLLAPSED": True, + "SHOW_TOOLBAR_CALLBACK": "clusters.debug_toolbar.show_toolbar", +} + +if not TESTING: + INSTALLED_APPS = [ + *INSTALLED_APPS, + "debug_toolbar", + ] + MIDDLEWARE = [ + "debug_toolbar.middleware.DebugToolbarMiddleware", + *MIDDLEWARE, + ] + +if TESTING: + CACHES["default"] = { + "BACKEND": "django.core.cache.backends.dummy.DummyCache", + } + +DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField" + +TEMPLATES = [ + { + "BACKEND": "django.template.backends.django.DjangoTemplates", + "DIRS": [BASE_DIR / "core" / "templates"], + "APP_DIRS": True, + "OPTIONS": { + "context_processors": [ + "django.template.context_processors.debug", + "django.template.context_processors.request", + "django.contrib.auth.context_processors.auth", + "django.contrib.messages.context_processors.messages", + ] + }, + }, +] + +LANGUAGE_CODE = "en-us" + +TIME_ZONE = "UTC" + +USE_I18N = True + +USE_L10N = True + +USE_TZ = True + +STATIC_URL = "static/" +STATIC_ROOT = BASE_DIR / "assets" + +# What bucket will we use for S3 assets? Not used by development (yet) +STATIC_S3_BUCKET = environ.get( + "STATIC_S3_BUCKET", "datacoves-us-east-1-core-api-assets" +) + +# For production, we need to have the static URL based on the Cluster database +# model. To avoid a circular dependency, we'll use a custom storage class. +if not DEBUG: + STORAGES = { + "staticfiles": { + "BACKEND": "datacoves.storage.S3", + } + } + +USE_X_FORWARDED_HOST = True +SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https") + +# Auth settings + +LOGIN_URL = "/iam/login" +LOGIN_ERROR_URL = "/iam/login-error" +LOGIN_REDIRECT_URL = f"https://{BASE_DOMAIN}" if BASE_DOMAIN else None +LOGOUT_REDIRECT = environ.get("LOGOUT_REDIRECT", f"https://{BASE_DOMAIN}/sign-in") +LOGOUT_REDIRECT_URL = "/iam/logout" # Admin logout url + +AUTH_USER_MODEL = "users.User" + +AUTH_PASSWORD_VALIDATORS = [ + { + "NAME": "django.contrib.auth.password_validation" + ".UserAttributeSimilarityValidator" + }, + {"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"}, + {"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"}, + {"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"}, +] + +REST_FRAMEWORK = { + "DEFAULT_AUTHENTICATION_CLASSES": [ + "datacoves.csrf_exempt_auth_class.CsrfExemptSessionAuthentication", + "rest_framework.authentication.BasicAuthentication", + "rest_framework.authentication.TokenAuthentication", + "rest_framework_simplejwt.authentication.JWTAuthentication", + # This will authorize tokens on all our classes by default. + # I don't think we want that -- I think we want it to work in + # very specific spots. Leaving this here so it is easy to + # enable if we choose to do so (and so the reasoning why is + # also known) + # "knox.auth.TokenAuthentication", + ], + "DEFAULT_FILTER_BACKENDS": ["django_filters.rest_framework.DjangoFilterBackend"], + "DEFAULT_PAGINATION_CLASS": "rest_framework.pagination.LimitOffsetPagination", + "DEFAULT_THROTTLE_CLASSES": [ + "rest_framework.throttling.AnonRateThrottle", + "rest_framework.throttling.UserRateThrottle", + ], + "DEFAULT_THROTTLE_RATES": {"anon": "1000/hour", "user": "2000/hour"}, +} + +SIMPLE_JWT = { + "TOKEN_OBTAIN_SERIALIZER": "iam.serializers.MyTokenObtainPairSerializer", + "ACCESS_TOKEN_LIFETIME": timedelta(minutes=60), + "REFRESH_TOKEN_LIFETIME": timedelta(days=30), +} + +KNOX_TOKEN_MODEL = "iam.DatacovesToken" + +REST_KNOX = { + "TOKEN_TTL": None, # don't expire tokens + "TOKEN_MODEL": "iam.DatacovesToken", +} + +# FIXME: Remove this setting to avoid security vulnerabilities, and configure +# SOCIAL_AUTH_ALLOWED_REDIRECT_HOSTS properly +SOCIAL_AUTH_SANITIZE_REDIRECTS = False + +SOCIAL_AUTH_REDIRECT_IS_HTTPS = True +SOCIAL_AUTH_TRAILING_SLASH = False # Remove trailing slash from routes +SOCIAL_AUTH_JSONFIELD_ENABLED = True +SOCIAL_AUTH_PIPELINE = ( + "social_core.pipeline.social_auth.social_details", + "social_core.pipeline.social_auth.social_uid", + "social_core.pipeline.social_auth.auth_allowed", + "iam.auth_pipeline.load_user", + "iam.auth_pipeline.add_to_group_by_role", + "social_core.pipeline.social_auth.social_user", + "social_core.pipeline.social_auth.associate_user", + "social_core.pipeline.social_auth.load_extra_data", + "social_core.pipeline.user.user_details", +) + +IDENTITY_PROVIDER = environ.get("IDENTITY_PROVIDER") + +# OIDC claims need to be prefixed with a namespace +# https://auth0.com/docs/secure/tokens/json-web-tokens/create-namespaced-custom-claims +IDP_SCOPE_NAMESPACE = "https://datacoves.com/" +IDP_SCOPE_CLAIMS = ["openid", "profile", "email"] +IDP_GROUPS_CLAIM = environ.get("IDP_GROUPS_CLAIM") +# User model fields that can't be modified +PROTECTED_USER_FIELDS = ["first_name", "last_name", "iam_groups"] + +# Some IDPs don't allow the groups claim, although they send them in the token (Azure AD) +IDP_SEND_GROUPS_CLAIM = env_bool("IDP_SEND_GROUPS_CLAIM") +if IDP_SEND_GROUPS_CLAIM and IDP_GROUPS_CLAIM: + IDP_SCOPE_CLAIMS.append(IDP_GROUPS_CLAIM) + +IDP_OIDC_USER_ID = "email" + +if IDENTITY_PROVIDER == "auth0": + AUTHENTICATION_BACKENDS = ["iam.backends.auth0.Auth0OAuth2"] + SOCIAL_AUTH_AUTH0_DOMAIN = environ["AUTH0_DOMAIN"] + SOCIAL_AUTH_AUTH0_KEY = environ["AUTH0_CLIENT_ID"] + SOCIAL_AUTH_AUTH0_SECRET = environ["AUTH0_CLIENT_SECRET"] + SOCIAL_AUTH_AUTH0_SCOPE = IDP_SCOPE_CLAIMS + SOCIAL_AUTH_AUTH0_AUTH_EXTRA_ARGUMENTS = {"prompt": "select_account"} + IDP_URL = f"https://{SOCIAL_AUTH_AUTH0_DOMAIN}" + IDP_CLIENT_ID = SOCIAL_AUTH_AUTH0_KEY + IDP_CLIENT_SECRET = SOCIAL_AUTH_AUTH0_SECRET + +elif IDENTITY_PROVIDER == "ping_one": + AUTHENTICATION_BACKENDS = ["iam.backends.ping.PingOneOpenIdConnect"] + SOCIAL_AUTH_PING_URL = environ["PING_URL"] + SOCIAL_AUTH_PING_ONE_KEY = environ["PING_CLIENT_ID"] + SOCIAL_AUTH_PING_ONE_SECRET = environ["PING_CLIENT_SECRET"] + SOCIAL_AUTH_PING_ONE_SCOPE = IDP_SCOPE_CLAIMS + IDP_URL = SOCIAL_AUTH_PING_URL + IDP_CLIENT_ID = SOCIAL_AUTH_PING_ONE_KEY + IDP_CLIENT_SECRET = SOCIAL_AUTH_PING_ONE_SECRET + +elif IDENTITY_PROVIDER == "ping_federate": + AUTHENTICATION_BACKENDS = ["iam.backends.ping.PingFederateOpenIdConnect"] + SOCIAL_AUTH_PING_URL = environ["PING_URL"] + SOCIAL_AUTH_PING_FEDERATE_KEY = environ["PING_CLIENT_ID"] + SOCIAL_AUTH_PING_FEDERATE_SECRET = environ["PING_CLIENT_SECRET"] + SOCIAL_AUTH_PING_FEDERATE_SCOPE = IDP_SCOPE_CLAIMS + IDP_URL = SOCIAL_AUTH_PING_URL + IDP_CLIENT_ID = SOCIAL_AUTH_PING_FEDERATE_KEY + IDP_CLIENT_SECRET = SOCIAL_AUTH_PING_FEDERATE_SECRET + +elif IDENTITY_PROVIDER == "azuread-tenant-oauth2": + IDP_OIDC_USER_ID = "preferred_username" + AUTHENTICATION_BACKENDS = ["iam.backends.azuread.AzureADTenantOAuth2"] + SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_RESOURCE = "https://graph.microsoft.com/" + SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY = environ["AZUREAD_CLIENT_ID"] + SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET = environ["AZUREAD_CLIENT_SECRET"] + SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID = environ["AZUREAD_TENANT_ID"] + SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SCOPE = IDP_SCOPE_CLAIMS + SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_AUTH_EXTRA_ARGUMENTS = { + "prompt": "select_account" + } + IDP_URL = f"https://login.microsoftonline.com/{SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID}/v2.0" + IDP_CLIENT_ID = SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_KEY + IDP_CLIENT_SECRET = SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_SECRET +else: + AUTHENTICATION_BACKENDS = ["django.contrib.auth.backends.ModelBackend"] + +# If explicitly enabling user and password authentication in addition to oauth +if IDENTITY_PROVIDER and env_bool("USER_AND_PASS_AUTH"): + AUTHENTICATION_BACKENDS.append("django.contrib.auth.backends.ModelBackend") + +SERVICE_AIRBYTE = "airbyte" +SERVICE_AIRFLOW = "airflow" +SERVICE_LOCAL_DBT_DOCS = "local-dbt-docs" +SERVICE_DBT_DOCS = "dbt-docs" +SERVICE_SUPERSET = "superset" +SERVICE_CODE_SERVER = "code-server" +SERVICE_LOCAL_AIRFLOW = "local-airflow" +SERVICE_DATAHUB = "datahub" + +SERVICES = [ + SERVICE_AIRBYTE, + SERVICE_AIRFLOW, + SERVICE_CODE_SERVER, + SERVICE_LOCAL_DBT_DOCS, + SERVICE_DBT_DOCS, + SERVICE_SUPERSET, + SERVICE_DATAHUB, +] + +# Billing +INSTANCE_SERVICES = [ + SERVICE_AIRBYTE, + SERVICE_AIRFLOW, + SERVICE_SUPERSET, + SERVICE_DATAHUB, +] + +INTERNAL_SERVICE_MINIO = "minio" +INTERNAL_SERVICE_POMERIUM = "pomerium" +INTERNAL_SERVICE_ELASTIC = "elastic" +INTERNAL_SERVICE_NEO4J = "neo4j" +INTERNAL_SERVICE_POSTGRESQL = "postgresql" +INTERNAL_SERVICE_KAFKA = "kafka" +INTERNAL_SERVICE_GRAFANA = "grafana" + +INTERNAL_SERVICES = [ + INTERNAL_SERVICE_MINIO, + INTERNAL_SERVICE_POMERIUM, + INTERNAL_SERVICE_ELASTIC, + INTERNAL_SERVICE_NEO4J, + INTERNAL_SERVICE_POSTGRESQL, + INTERNAL_SERVICE_KAFKA, + INTERNAL_SERVICE_GRAFANA, +] + +USER_SERVICES = [SERVICE_CODE_SERVER, SERVICE_LOCAL_DBT_DOCS, SERVICE_LOCAL_AIRFLOW] + +CLUSTER_SERVICES = [INTERNAL_SERVICE_GRAFANA] + +# These are the default resources on workbench +WORKBENCH_RESOURCES = [f"workbench:{service}" for service in SERVICES] + +# These are the default resources on dbt-api +# They are created when an Environment is created. +DBT_API_RESOURCES = ["{cluster_domain}:{env_slug}|dbt-api:manifest|write"] + +DBT_API_URL = "http://core-dbt-api-svc.core.svc.cluster.local:80" +DBT_API_UPLOAD_MANIFEST_URL = ( + "http://core-dbt-api-svc.core.svc.cluster.local:80/api/internal/manifests" +) + +# --------- More granular services resources ----------------- + +# --- AIRFLOW ---- +# if granted read action on 'workbench:airflow', it assumes 'viewer' +# if granted write action on 'workbench:airflow', it assumes 'admin' (all privileges) +SERVICE_AIRFLOW_SECURITY = ( + f"workbench:{SERVICE_AIRFLOW}:security" # write action -> 'admin' role +) +SERVICE_AIRFLOW_ADMIN = ( + f"workbench:{SERVICE_AIRFLOW}:admin" # write action -> 'op' role +) +SERVICE_AIRFLOW_SYS_ADMIN = ( + f"workbench:{SERVICE_AIRFLOW}:sysadmin" # write action -> 'sysadmin' role +) +SERVICE_AIRFLOW_DAGS = ( + f"workbench:{SERVICE_AIRFLOW}:dags" # write action -> 'user' role +) +WORKBENCH_RESOURCES.append(SERVICE_AIRFLOW_SECURITY) # resources under 'security' menu +WORKBENCH_RESOURCES.append(SERVICE_AIRFLOW_ADMIN) # resources under 'admin' menu +WORKBENCH_RESOURCES.append( + SERVICE_AIRFLOW_SYS_ADMIN +) # resources under mix 'admin' menu +WORKBENCH_RESOURCES.append(SERVICE_AIRFLOW_DAGS) # resources under 'browse' menu + +# --- SUPERSET ---- +# if granted read action on 'workbench:superset', it assumes 'Gamma' +# if granted write action on 'workbench:superset', it assumes 'Admin' (all privileges) +SERVICE_SUPERSET_SECURITY = ( + f"workbench:{SERVICE_SUPERSET}:security" # write action -> 'Admin' role +) +SERVICE_SUPERSET_DATA_SOURCES = ( + f"workbench:{SERVICE_SUPERSET}:data-sources" # write action -> 'Alpha' role +) +WORKBENCH_RESOURCES.append(SERVICE_SUPERSET_SECURITY) # resources under 'security' menu +WORKBENCH_RESOURCES.append(SERVICE_SUPERSET_DATA_SOURCES) # resources under 'data' menu + +# --- DATAHUB ---- +# if granted read action on 'workbench:datahub', it assumes 'viewer' +# if granted write action on 'workbench:datahub', it assumes 'admin' (all privileges) +SERVICE_DATAHUB_ADMIN = ( + f"workbench:{SERVICE_DATAHUB}:admin" # write action -> 'admin' role +) +SERVICE_DATAHUB_DATA = ( + f"workbench:{SERVICE_DATAHUB}:data" # write action -> 'editor' role + # read action -> 'viewer' role +) +WORKBENCH_RESOURCES.append(SERVICE_DATAHUB_ADMIN) +WORKBENCH_RESOURCES.append(SERVICE_DATAHUB_DATA) + + +ADMIN_GROUPS_RESOURCE = "admin:groups" +ADMIN_USERS_RESOURCE = "admin:users" +ADMIN_INVITATIONS_RESOURCE = "admin:invitations" +ADMIN_SECRETS_RESOURCE = "admin:secrets" + +SERVICE_GRAFANA_CONFIGURATION = f"services:{INTERNAL_SERVICE_GRAFANA}:configuration" +SERVICE_GRAFANA_DASHBOARDS = f"services:{INTERNAL_SERVICE_GRAFANA}:dashboards" + +IAM_RESOURCES = [ + ADMIN_GROUPS_RESOURCE, + ADMIN_USERS_RESOURCE, + ADMIN_INVITATIONS_RESOURCE, +] + +ACCOUNT_RESOURCES = [ + "admin:environments", + "admin:projects", + ADMIN_GROUPS_RESOURCE, + ADMIN_USERS_RESOURCE, + ADMIN_INVITATIONS_RESOURCE, + "admin:connectiontemplates", + "admin:connectiontypes", + "admin:servicecredentials", + "admin:servicesecrets", + "admin:billing", + "admin:integrations", + ADMIN_SECRETS_RESOURCE, + "admin:templates", + "admin:profiles", + f"services:{INTERNAL_SERVICE_GRAFANA}", + SERVICE_GRAFANA_CONFIGURATION, # Entire configuration menu + SERVICE_GRAFANA_DASHBOARDS, # Dashboards +] + +VALID_RESOURCES = WORKBENCH_RESOURCES + ACCOUNT_RESOURCES + +ACTION_READ = "read" +ACTION_WRITE = "write" + +VALID_ACTIONS = [ACTION_READ, ACTION_WRITE] + +DEFAULT_DOCKER_CONFIG = {} +docker_config = environ.get("DEFAULT_DOCKER_CONFIG") +if docker_config: + docker_config = base64.decodebytes(bytes(docker_config, "ascii")) + docker_config = json.loads(docker_config) + DEFAULT_DOCKER_CONFIG = docker_config + +# External services + +STRIPE_WEBHOOK_SECRET = environ.get("STRIPE_WEBHOOK_SECRET") +STRIPE_API_KEY = environ.get("STRIPE_API_KEY") +STRIPE_CUSTOMER_PORTAL = environ.get("STRIPE_CUSTOMER_PORTAL") +STRIPE_RETRY_TIMES = environ.get("STRIPE_RETRY_TIMES", 3) + +BILLING_ENABLED = STRIPE_API_KEY is not None + + +PROMETHEUS_API_URL = "http://prometheus-kube-prometheus-prometheus.prometheus.svc.cluster.local:9090/api/v1" + +# Invitations + +DEFAULT_FROM_EMAIL = environ.get("DEFAULT_FROM_EMAIL", "no-reply@datacoves.com") +INVITATION_EXPIRY_DAYS = 3 +INVITATION_SUCCESS_REDIRECT = LOGIN_URL +INVITATION_ERROR_URL = "/invitations/error" +INVITATION_MAX_ATTEMPTS = 3 +SETUP_REQUESTS_RECEIVER = "support@datacoves.com" + +# Email configuration + +SENDGRID_API_KEY = environ.get("SENDGRID_API_KEY") + +if SENDGRID_API_KEY: + EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" + EMAIL_HOST = "smtp.sendgrid.net" + EMAIL_HOST_USER = "apikey" # this is exactly the value 'apikey' + EMAIL_HOST_PASSWORD = SENDGRID_API_KEY + EMAIL_PORT = 587 + EMAIL_USE_TLS = True +elif environ.get("EMAIL_HOST"): + EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend" + EMAIL_HOST = environ.get("EMAIL_HOST") + EMAIL_HOST_USER = environ.get("EMAIL_HOST_USER") + EMAIL_HOST_PASSWORD = environ.get("EMAIL_HOST_PASSWORD") + EMAIL_PORT = environ.get("EMAIL_PORT", 25) + EMAIL_USE_TLS = env_bool(key="EMAIL_USE_TLS") +else: + EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend" + +# Celery configuration + +CELERY_BROKER_URL = environ.get("CELERY_BROKER_URL") +CELERY_BROKER_HEARTBEAT = None +CELERY_BROKER_CONNECTION_TIMEOUT = 30 +CELERY_BROKER_CONNECTION_RETRY_ON_STARTUP = True +CELERY_RESULT_BACKEND = "django-db" +CELERY_EVENT_QUEUE_EXPIRES = 60 +CELERY_WORKER_PREFETCH_MULTIPLIER = 1 +CELERY_WORKER_CONCURRENCY = 3 +CELERY_TASK_IGNORE_RESULT = False + +CELERY_ENABLE_UTC = True +CELERY_TIMEZONE = "UTC" +CELERY_CREATE_MISSING_QUEUES = True +CELERY_TASK_DEFAULT_EXCHANGE = "default" +CELERY_TASK_DEFAULT_ROUTING_KEY = "default" +# https://stackoverflow.com/questions/32022401/changing-celery-default-queue-not-working-properly-as-expected +CELERY_TASK_DEFAULT_QUEUE = "api-main" +CELERY_TASK_ROUTES = { + "billing.*": {"queue": "api-long"}, +} + +CELERY_WORKER_CANCEL_LONG_RUNNING_TASKS_ON_CONNECTION_LOSS = True + +CELERY_ALWAYS_EAGER = False +CELERY_TASK_SERIALIZER = "json" +CELERY_ACCEPT_CONTENT = ["application/json"] +CELERYD_MAX_TASKS_PER_CHILD = 1000 +CELERY_EVENT_QUEUE_TTL = 10 +CELERY_SEND_EVENTS = True + +# Run all celery tasks locally when testing +CELERY_TASK_ALWAYS_EAGER = False +RUN_TASKS_SYNCHRONOUSLY = False + +DJANGO_CELERY_BEAT_TZ_AWARE = False + +# Verify your crontab syntax with https://crontab.guru. The crontab function +# defaults to "*" for unspecified arguments. +CELERY_BEAT_SCHEDULE = { + "turn_off_unused_workspaces": { + "task": "projects.tasks.turn_off_unused_workspaces", + "schedule": crontab(minute="*/15"), # Every 15 minute. + }, + "stop_sharing_codeservers": { + "task": "projects.tasks.stop_sharing_codeservers", + "schedule": crontab(minute="*/15"), # Every 15 minutes. + }, + "tally_resource_usage": { + "task": "billing.tasks.tally_resource_usage", + "schedule": crontab(minute="5", hour="0"), # At 00:05, every day. + }, + "inform_billing_events": { + "task": "billing.tasks.inform_billing_events", + "schedule": crontab(minute="*/10"), # Every 10 minutes. + }, + "celery_heartbeat": { + "task": "clusters.tasks.celery_heartbeat", + "schedule": crontab(minute="*/5"), # Every 5 minutes. + }, + "clear_tokens": { + "task": "iam.tasks.clear_tokens", + "schedule": crontab(minute="10", hour="0"), # At 00:10, every day. + }, + "delete_unused_project_keys": { + "task": "projects.tasks.delete_unused_project_keys", + "schedule": crontab(minute="20", hour="0"), # At 00:20, every day. + }, + "delete_unused_user_keys": { + "task": "projects.tasks.delete_unused_user_keys", + "schedule": crontab(minute="23", hour="0"), # At 00:23, every day. + }, + "sync_groups": { + "task": "iam.tasks.remove_missing_user_groups", + "schedule": crontab(minute="25", hour="0"), # At 00:25, every day. + }, + "remove_unused_user_volumes": { + "task": "projects.tasks.remove_unused_user_volumes", + "schedule": crontab(minute="30", hour="0"), # At 00:30, every day. + }, + "remove_unused_environments": { + "task": "projects.tasks.remove_unused_environments", + "schedule": crontab(minute="35", hour="0"), # At 00:35, every day. + }, + "delete_cluster_alerts_older": { + "task": "clusters.tasks.delete_cluster_alerts_older", + "schedule": crontab(minute="40", hour="0"), # At 00:40, every day. + }, + "update_grafana_datasources": { + "task": "clusters.tasks.update_grafana_datasources", + "schedule": crontab(minute="*/10"), # Every 10 minutes. + }, + "remove_k8s_resources": { + "task": "clusters.tasks.remove_k8s_resources", + "schedule": crontab(minute="50", hour="0"), # At 00:50, every day. + }, + "deactivate_users": { + "task": "projects.tasks.deactivate_users", + "schedule": crontab(minute="55", hour="0"), # At 00:55, every day. + }, + "prometheus_metrics": { + "task": "clusters.tasks.prometheus_metrics", + "schedule": crontab(minute="*/2"), # Every 2 minutes. + }, +} + +# Tallies + +# When TALLY_START env var is not found, tallymarks are not generated +TALLY_START = ( + datetime.fromisoformat(environ["TALLY_START"] + " 00:00+00:00") + if environ.get("TALLY_START") + else None +) +TALLY_WINDOW = timedelta(days=31) + +TALLY_AIRFLOW_WORKERS_NAME = "airflow_workers_daily_running_time_seconds" +TALLY_AIRBYTE_WORKERS_NAME = "airbyte_workers_daily_running_time_seconds" + +# Sentry configuration + +SENTRY_DSN = environ.get("SENTRY_DSN") +DISABLE_SENTRY = env_bool("DISABLE_SENTRY") +if SENTRY_DSN and not DISABLE_SENTRY: + # Sentry + sentry_sdk.init( + dsn=SENTRY_DSN, + environment=BASE_DOMAIN, + release=environ["RELEASE"], + integrations=[DjangoIntegration()], + # Set traces_sample_rate to 1.0 to capture 100% + # of transactions for performance monitoring. + # We recommend adjusting this value in production. + traces_sample_rate=0.001, + # If you wish to associate users to errors (assuming you are using + # django.contrib.auth) you may enable sending PII data. + send_default_pii=True, + # To set a uniform sample rate + # Set profiles_sample_rate to 1.0 to profile 100% + # of sampled transactions. + # We recommend adjusting this value in production, + profiles_sample_rate=0.01, + # Alternatively, to control sampling dynamically + # profiles_sampler=profiles_sampler + ) + sentry_sdk.set_tag("identity_provider", IDENTITY_PROVIDER) + sentry_sdk.set_tag("version", environ["VERSION"]) + +# OIDC server configuration + +OAUTH_OIDC_RSA_KEY = environ.get("OAUTH_OIDC_RSA_KEY", "").replace("\\n", "\n") + +OAUTH2_PROVIDER = { + "OIDC_ENABLED": True, + "OIDC_RSA_PRIVATE_KEY": OAUTH_OIDC_RSA_KEY, + "PKCE_REQUIRED": False, # FIXME: This should be True + "OAUTH2_VALIDATOR_CLASS": "iam.oauth_validators.CustomOAuth2Validator", + "SCOPES": { + "openid": "OpenID Connect scope", + "profile": "", + "email": "", + "groups": "Groups user belong to", + }, +} +AUTHENTICATION_BACKENDS.append("oauth2_provider.backends.OAuth2Backend") + +# Slack token +SLACK_BOT_TOKEN = environ.get("SLACK_BOT_TOKEN", "") +SLACK_BILLING_CHANNEL = environ.get("SLACK_BILLING_CHANNEL", "bot-billing-events") +SLACK_CLUSTER_CHANNEL = environ.get("SLACK_CLUSTER_CHANNEL", "bot-cluster-events") + + +# LDAP configuration +LDAP_HOST = environ.get("LDAP_HOST") +LDAP_USERNAME = environ.get("LDAP_USERNAME") +LDAP_PASSWORD = environ.get("LDAP_PASSWORD") +LDAP_FILTER_QUERY = environ.get("LDAP_FILTER_QUERY") +LDAP_BASE = environ.get("LDAP_BASE") + +# Logging config +LOGGING = { + "version": 1, + "disable_existing_loggers": False, + "formatters": { + "verbose": { + "format": "%(asctime)s [%(levelname)s] - {%(filename)s:%(lineno)d}: %(message)s", + }, + "simple": { + "format": "%(message)s", + }, + }, + "handlers": { + "console": { + "class": "logging.StreamHandler", + "formatter": "verbose", + }, + "cli": { + "class": "logging.StreamHandler", + "formatter": "simple", + }, + }, + "root": { + "handlers": ["console"], + "level": environ.get("DJANGO_LOG_LEVEL", "INFO"), + }, + "loggers": { + "django": { + "handlers": ["console"], + "level": "INFO", + "propagate": False, + }, + "clusters.config_loader": { + "handlers": ["cli"], + "level": "INFO", + "propagate": False, + }, + "projects.management": { + "handlers": ["cli"], + "level": "INFO", + "propagate": False, + }, + "projects.git": { + "handlers": ["console"], + "level": "INFO", + "propagate": False, + }, + }, +} + +# Prometheus metrics +# PROMETHEUS_METRIC_NAMESPACE = "datacoves" +PROMETHEUS_EXPORT_MIGRATIONS = False + +# CSV export feature +CSV_EXPORT_REFERENCE_DEPTH = 2 +CSV_EXPORT_ESCAPECHAR = "\\" diff --git a/src/core/api/app/datacoves/storage.py b/src/core/api/app/datacoves/storage.py new file mode 100644 index 00000000..81a8ff83 --- /dev/null +++ b/src/core/api/app/datacoves/storage.py @@ -0,0 +1,23 @@ +""" +This is a stub of a storage module which allows us to override the 'url' +command to route traffic to our S3 bucket based on cluster vesion. +""" + +from clusters.models import Cluster +from django.conf import settings +from django.contrib.staticfiles.storage import StaticFilesStorage + + +class S3(StaticFilesStorage): + def __init__(self, location=None, base_url=None, *args, **kwargs): + if base_url is None: + base_url = ( + f"https://{settings.STATIC_S3_BUCKET}.s3.amazonaws.com/" + + Cluster.objects.current() + .prefetch_related("release") + .first() + .release.name + + "/" + ) + + super().__init__(location, base_url, *args, **kwargs) diff --git a/src/core/api/app/datacoves/unit_tests_settings.py b/src/core/api/app/datacoves/unit_tests_settings.py new file mode 100644 index 00000000..01f58212 --- /dev/null +++ b/src/core/api/app/datacoves/unit_tests_settings.py @@ -0,0 +1,21 @@ +import os + +from datacoves.settings import * # noqa +from datacoves.settings import BASE_DIR + +CELERY_ALWAYS_EAGER = True + +SECRET_KEY = "test" + +FERNET_KEY = "R2CqeA2xncRDTxlepFZF22oJRnxdVmWfO1-pDXYJAS0=" + +BILLING_ENABLED = True + +BASE_DOMAIN = "datacoveslocal.com" + +DATABASES = { + "default": { + "ENGINE": "django.db.backends.sqlite3", + "NAME": os.path.join(BASE_DIR, "db.sqlite3"), + } +} diff --git a/src/core/api/app/datacoves/urls.py b/src/core/api/app/datacoves/urls.py new file mode 100644 index 00000000..2663240e --- /dev/null +++ b/src/core/api/app/datacoves/urls.py @@ -0,0 +1,306 @@ +"""datacoves URL Configuration + +The `urlpatterns` list routes URLs to views. For more information please see: + https://docs.djangoproject.com/en/3.2/topics/http/urls/ +Examples: +Function views + 1. Add an import: from my_app import views + 2. Add a URL to urlpatterns: path('', views.home, name='home') +Class-based views + 1. Add an import: from other_app.views import Home + 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') +Including another URLconf + 1. Import the include() function: from django.urls import include, path + 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) +""" + +from billing import views as billing_views +from clusters import views as cluster_views +from codegen import views as codegen_views +from credentials import views as credentials_views +from debug_toolbar.toolbar import debug_toolbar_urls +from django.conf import settings +from django.conf.urls.static import static +from django.contrib import admin +from django.shortcuts import redirect +from django.urls import include, path +from iam import views as iam_views +from integrations import views as integrations_views +from invitations import views as invitation_views +from projects import views as projects_views +from rest_framework_simplejwt.views import ( + TokenObtainPairView, + TokenRefreshView, + TokenVerifyView, +) +from users import views as users_views + +admin.site.site_header = "Datacoves" +admin.site.index_title = "Cluster administration" +admin.site.site_title = "Datacoves Administration" +# Disable user and pass form when using 3rd party auth +if "django.contrib.auth.backends.ModelBackend" in settings.AUTHENTICATION_BACKENDS: + admin.site.login_template = "admin/combined_login.html" +else: + admin.site.login_template = "admin/sso_login.html" + + +urlpatterns = [ + path("", include("django_prometheus.urls")), + path("grappelli/", include("grappelli.urls")), + # ------- Login / logout related endpoints ----------------------------- + path(r"healthcheck/", include("health_check.urls")), + path(r"healthz/", cluster_views.healthcheck), + # ------- Login / logout related endpoints ----------------------------- + path("", include("social_django.urls")), + path("api/token/", TokenObtainPairView.as_view(), name="token_jwt_obtain_pair"), + path("api/token/refresh/", TokenRefreshView.as_view(), name="token_jwr_refresh"), + path("api/token/verify/", TokenVerifyView.as_view(), name="token_jwt_verify"), + path("api/datacoves/verify/", iam_views.ValidateDatacovesToken.as_view()), + path("iam/login", iam_views.login, name="login"), + path("iam/login-error", iam_views.login_error), + path("iam/logout", iam_views.logout), + path("api/iam/accounts", iam_views.UserAccounts.as_view()), + path("api/iam/user-info", iam_views.UserInfo.as_view()), + # ------- Profile related endpoints ---------------------------- + path("api/iam/profile", iam_views.ProfileDetail.as_view()), + path("api/iam/profile/credentials", iam_views.ProfileCredentialList.as_view()), + path( + "api/iam/profile/credentials/", + iam_views.ProfileCredentialDetail.as_view(), + ), + path("api/iam/profile/ssh-keys", iam_views.ProfileSSHKeyList.as_view()), + path("api/iam/profile/ssh-keys/", iam_views.ProfileSSHKeyDetail.as_view()), + path("api/iam/profile/ssl-keys", iam_views.ProfileSSLKeyList.as_view()), + path("api/iam/profile/ssl-keys/", iam_views.ProfileSSLKeyDetail.as_view()), + path( + "invitations//accept", + invitation_views.AcceptInvite.as_view(), + name="accept-invite", + ), + path( + "api/iam/profile/user-environment//variables", + iam_views.UserEnvironmentVariablesDetail.as_view(), + ), + path("invitations/error", invitation_views.invite_error), + # ------- Admin User and groups related endpoints ---------------------------- + path("api/admin//groups", iam_views.GroupList.as_view()), + path( + "api/admin//groups/", + iam_views.GroupDetail.as_view(), + ), + path("api/admin//users", iam_views.UserList.as_view()), + path( + "api/admin//users/", + iam_views.UserDetail.as_view(), + ), + path( + "api/admin//permissions", + iam_views.AccountPermissionList.as_view(), + ), + path("api/admin//settings", iam_views.AccountDetail.as_view()), + # ------- Admin invitations -------------------------------------------------- + path( + "api/admin//invitations", + invitation_views.InvitationList.as_view(), + ), + path( + "api/admin//invitations/", + invitation_views.InvitationDetail.as_view(), + ), + path( + "api/admin//invitations//resend", + invitation_views.ResendInvitation.as_view(), + ), + # ------- Admin Account and project related endpoints ------------------------ + path( + "api/admin//projects", + projects_views.ProjectList.as_view(), + ), + path( + "api/admin//projects/", + projects_views.ProjectDetail.as_view(), + ), + path( + "api/admin//projects//keys", + projects_views.ProjectKeys.as_view(), + ), + path( + "api/admin//projects//keys/", + projects_views.ProjectKeys.as_view(), + ), + path( + "api/admin/adapters/default-values", + projects_views.AdaptersDefaultValues.as_view(), + ), + path( + "api/admin//environments", + projects_views.EnvironmentList.as_view(), + ), + path( + "api/admin//environments/", + projects_views.EnvironmentDetail.as_view(), + ), + path( + "api/admin//environments//keys", + projects_views.EnvironmentKeys.as_view(), + ), + path( + "api/admin//environments//keys/", + projects_views.EnvironmentKeys.as_view(), + ), + path( + "api/admin//connectiontemplates", + projects_views.ConnectionTemplateList.as_view(), + ), + path( + "api/admin//connectiontemplates/", + projects_views.ConnectionTemplateDetail.as_view(), + ), + path( + "api/admin//integrations", + integrations_views.IntegrationList.as_view(), + ), + path( + "api/admin//templates", + codegen_views.TemplateList.as_view(), + ), + path( + "api/admin//integrations/", + integrations_views.IntegrationDetail.as_view(), + ), + path( + "api/admin//servicecredentials", + projects_views.ServiceCredentialList.as_view(), + ), + path( + "api/admin//servicecredentials/", + projects_views.ServiceCredentialDetail.as_view(), + ), + path( + "api/admin//servicesecrets", + projects_views.ServiceSecretList.as_view(), + ), + # ------- Accounts related endpoints (user belonging to account) ------------------------ + path( + "api/accounts//connectiontemplates", + projects_views.ProjectConnectionTemplateList.as_view(), + ), + path( + "api/accounts//connectiontypes", + projects_views.ConnectionTypeList.as_view(), + ), + # ------- Account Setup related endpoints ------------------------ + path( + "api/setup/test-db-connection", + projects_views.TestDbConnection.as_view(), + ), + path("api/setup/generate-ssh-key", projects_views.GenerateSSHKey.as_view()), + path("api/setup/generate-ssl-key", projects_views.GenerateSSLKey.as_view()), + path( + "api/setup/test-git-connection", + projects_views.TestGitConnection.as_view(), + ), + path( + "api/billing/subscribe", + billing_views.SubscribeAccount.as_view(), + ), + path( + "api/accounts/setup", + projects_views.AccountSetup.as_view(), + ), + path( + "api/webhook", + projects_views.AdmissionWebHookApiView.as_view(), + ), + path("api/setup/notify", users_views.NotifySetupRequest.as_view()), + path( + "api/setup//accept", + users_views.AcceptSetupRequest.as_view(), + name="accept-setup", + ), + path( + "api/setup//reject", + users_views.RejectSetupRequest.as_view(), + name="reject-setup", + ), + # ------- Workbench related endpoints ----------------------------------------- + path( + "api/workbench//status", + cluster_views.WorkbenchStatus.as_view(), + ), + path( + "api/workbench//heartbeat", + cluster_views.WorkbenchHeartbeat.as_view(), + ), + path( + "api/workbench//code-server/start-local-airflow", + cluster_views.WorkbenchCodeServerStartLocalAirflow.as_view(), + ), + path( + "api/workbench//code-server/restart", + cluster_views.WorkbenchCodeServerRestart.as_view(), + ), + path( + "api/workbench//code-server/settings", + cluster_views.WorkbenchCodeServerSettings.as_view(), + ), + path( + "api/alerts", + cluster_views.AlertView.as_view(), + ), + # ------- Credentials/Secrets related endpoints ----------------------------- + path( + "api/admin//secrets", credentials_views.SecretList.as_view() + ), + path( + "api/admin//secrets/", + credentials_views.SecretDetail.as_view(), + ), + path( + "api/v1/secrets/", + credentials_views.PublicSecretList.as_view(), + ), + path( + "api/v1/secret-push/", + projects_views.push_secrets_variable_to_airflow, + ), + path( + "api/v1/secrets-fetch/", + projects_views.TeamAirflowSecretFetchView.as_view(), + ), + # ------- Profiles related endpoints ----------------------------- + path("api/admin//profiles", projects_views.ProfileList.as_view()), + path( + "api/admin/profileimageset//done/", + projects_views.ProfileImageSetHook.as_view(), + ), + path( + "api/admin//profiles/", + projects_views.ProfileDetail.as_view(), + ), + # ------- Billing related endpoints ----------------------------------------- + path( + "api/billing/stripe", + billing_views.stripe_webhook, + ), + # ------- Admin panel -------------------------------------------------- + path("panel/docs/", include("django.contrib.admindocs.urls")), + path("panel/logout/", iam_views.logout), + path("panel/", admin.site.urls), + path("auth/", include("oauth2_provider.urls", namespace="oauth2_provider")), + # ------- Internal only endpoints ----------------------------------------- + path( + "api/v1/gitcallback/", + projects_views.dynamic_repo_credentials, + ), +] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + +if settings.DEBUG: + urlpatterns.append(path("", lambda request: redirect("/panel"))) + urlpatterns.append(path("grappelli-docs/", include("grappelli.urls_docs"))) + +if not settings.TESTING: + urlpatterns = [ + *urlpatterns, + ] + debug_toolbar_urls() diff --git a/src/core/api/app/datacoves/wsgi.py b/src/core/api/app/datacoves/wsgi.py new file mode 100644 index 00000000..5a6ef321 --- /dev/null +++ b/src/core/api/app/datacoves/wsgi.py @@ -0,0 +1,16 @@ +""" +WSGI config for datacoves project. + +It exposes the WSGI callable as a module-level variable named ``application``. + +For more information on this file, see +https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ +""" + +import os + +from django.core.wsgi import get_wsgi_application + +os.environ.setdefault("DJANGO_SETTINGS_MODULE", "datacoves.settings") + +application = get_wsgi_application() diff --git a/src/core/api/app/db.sqlite3 b/src/core/api/app/db.sqlite3 new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/factories.py b/src/core/api/app/factories.py new file mode 100644 index 00000000..3893902d --- /dev/null +++ b/src/core/api/app/factories.py @@ -0,0 +1,242 @@ +from datetime import timedelta + +from billing.models import Plan, Product, Tally, TallyMark +from clusters.models import Cluster +from django.conf import settings +from django.utils import timezone +from factory import SubFactory +from factory.django import DjangoModelFactory +from invitations.models import Invitation +from projects.models import Environment, Project, Release, Repository, SSHKey +from users.models import Account, User + + +class UserFactory(DjangoModelFactory): + class Meta: + model = User + + email = "test@datacoveslocal.com" + + +price = { + "id": "price_1OIWiCLF8qmfSSrQyyfLuZFC", + "type": "recurring", + "created": 1701437096, + "product": "prod_MosppM3RQpT7a8", + "currency": "usd", + "livemode": False, + "metadata": {}, + "nickname": "standard", + "recurring": { + "interval": "month", + "usage_type": "metered", + "interval_count": 1, + "aggregate_usage": "sum", + "trial_period_days": None, + }, + "lookup_key": None, + "tiers_mode": None, + "unit_amount": 10, + "tax_behavior": "exclusive", + "billing_scheme": "per_unit", + "custom_unit_amount": None, + "transform_quantity": None, + "unit_amount_decimal": "10", +} + + +variants = [{"standard": {"items": [{"price": price}]}}] + + +items = [ + { + "id": "si_OkLIYCJTpYPSso", + "plan": { + "id": "price_1OIWiCLF8qmfSSrQyyfLuZFC", + "active": True, + "amount": 10, + "object": "plan", + "created": 1668718371, + "product": "prod_MosppM3RQpT7a8", + "currency": "usd", + "interval": "month", + "livemode": True, + "metadata": {}, + "nickname": "standard", + "tiers_mode": None, + "usage_type": "metered", + "amount_decimal": "10", + "billing_scheme": "per_unit", + "interval_count": 1, + "aggregate_usage": None, + "transform_usage": None, + "trial_period_days": None, + }, + "price": price, + } +] + +price = { + "id": "price_1OIWiCLF8qmfSSrQyyfLuZFC", + "type": "recurring", + "created": 1701437096, + "product": "prod_MosppM3RQpT7a8", + "currency": "usd", + "livemode": False, + "metadata": {}, + "nickname": "standard", + "recurring": { + "interval": "month", + "usage_type": "metered", + "interval_count": 1, + "aggregate_usage": "sum", + "trial_period_days": None, + }, + "lookup_key": None, + "tiers_mode": None, + "unit_amount": 10, + "tax_behavior": "exclusive", + "billing_scheme": "per_unit", + "custom_unit_amount": None, + "transform_quantity": None, + "unit_amount_decimal": "10", +} + + +variants = [{"standard": {"items": [{"price": price}]}}] + + +items = [ + { + "id": "si_OkLIYCJTpYPSso", + "plan": { + "id": "price_1OIWiCLF8qmfSSrQyyfLuZFC", + "active": True, + "amount": 10, + "object": "plan", + "created": 1668718371, + "product": "prod_MosppM3RQpT7a8", + "currency": "usd", + "interval": "month", + "livemode": True, + "metadata": {}, + "nickname": "standard", + "tiers_mode": None, + "usage_type": "metered", + "amount_decimal": "10", + "billing_scheme": "per_unit", + "interval_count": 1, + "aggregate_usage": None, + "transform_usage": None, + "trial_period_days": None, + }, + "price": price, + } +] + + +class ReleaseFactory(DjangoModelFactory): + class Meta: + model = Release + + name = "123" + commit = "123" + airbyte_chart = {"version": "0.48.8"} + airflow_chart = {"version": "1.7.0-dev"} + superset_chart = {"version": "0.10.6"} + released_at = timezone.now() + + +class ClusterFactory(DjangoModelFactory): + class Meta: + model = Cluster + + domain = "datacoveslocal.com" + kubernetes_version = "1.27" + release = SubFactory(ReleaseFactory) + + +class ProductFactory(DjangoModelFactory): + class Meta: + model = Product + + id = "prod_MosppM3RQpT7a8" + tally_name = "airflow_workers_daily_running_time_seconds" + + +class PlanFactory(DjangoModelFactory): + class Meta: + model = Plan + django_get_or_create = ("slug",) + + name = "test" + slug = "test" + kind = Plan.KIND_GROWTH + variants = variants + + +class AccountFactory(DjangoModelFactory): + class Meta: + model = Account + + name = "test" + created_by = None + + +class RepositoryFactory(DjangoModelFactory): + class Meta: + model = Repository + + git_url = "git@github.com:datacoves/balboa.git" + + +class SSHKeyFactory(DjangoModelFactory): + class Meta: + model = SSHKey + + private = "134" + public = "456" + + +class ProjectFactory(DjangoModelFactory): + class Meta: + model = Project + + name = ("test",) + account = SubFactory(AccountFactory) + repository = SubFactory(RepositoryFactory) + deploy_key = SubFactory(SSHKeyFactory) + validated_at = timezone.now() + + +class EnvironmentFactory(DjangoModelFactory): + class Meta: + model = Environment + + name = ("test",) + project = (SubFactory(ProjectFactory),) + services = ({},) + internal_services = ({},) + cluster = (SubFactory(ClusterFactory),) + sync = False + + +class TallyFactory(DjangoModelFactory): + class Meta: + model = Tally + + account = (SubFactory(AccountFactory),) + project = (SubFactory(ProjectFactory),) + environment = (SubFactory(EnvironmentFactory),) + name = (settings.TALLY_AIRFLOW_WORKERS_NAME,) + period = timedelta(days=1) + + +class TallyMarkFactory(DjangoModelFactory): + class Meta: + model = TallyMark + + +class InvitationFactory(DjangoModelFactory): + class Meta: + model = Invitation diff --git a/src/core/api/app/iam/__init__.py b/src/core/api/app/iam/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/iam/admin.py b/src/core/api/app/iam/admin.py new file mode 100644 index 00000000..4a8df6a2 --- /dev/null +++ b/src/core/api/app/iam/admin.py @@ -0,0 +1,24 @@ +from django.contrib import admin + +from datacoves.admin import BaseModelAdmin + +from .models import DatacovesToken + + +@admin.register(DatacovesToken) +class DatacovesTokenAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ( + "user", + "type", + "account", + "project", + "environment", + "created", + "expiry", + ) + + def account(self, obj): + if obj.type == DatacovesToken.TYPE_ENVIRONMENT: + return obj.environment.project.account + else: + return obj.project.account diff --git a/src/core/api/app/iam/apps.py b/src/core/api/app/iam/apps.py new file mode 100644 index 00000000..b639f33d --- /dev/null +++ b/src/core/api/app/iam/apps.py @@ -0,0 +1,5 @@ +from django.apps import AppConfig + + +class IAMConfig(AppConfig): + name = "iam" diff --git a/src/core/api/app/iam/auth_pipeline.py b/src/core/api/app/iam/auth_pipeline.py new file mode 100644 index 00000000..0107d265 --- /dev/null +++ b/src/core/api/app/iam/auth_pipeline.py @@ -0,0 +1,99 @@ +import logging + +from django.conf import settings +from social_core.exceptions import AuthMissingParameter +from users.models import ExtendedGroup, User + +logger = logging.getLogger(__name__) + + +def load_user(backend, details, response, *args, **kwargs): + """ + Gets user from DB by email, if doesn't exist creates it. + """ + + user_email = details.get("email") + + logger.info(f"load_user: {user_email}") + + if user_email is None: + logger.error("load_user: missing parameter 'email'") + raise AuthMissingParameter(backend, "email") + + user_name = details.get("fullname") + + logger.info(f"load_user: {user_name}") + + if user_name is None: + logger.error("load_user: missing parameter 'fullname'") + raise AuthMissingParameter(backend, "fullname") + + user, _ = User.objects.update_or_create( + email__iexact=user_email, + defaults={"name": user_name, "email": user_email, "deactivated_at": None}, + ) + + logger.info(f"load_user: {user_email} is user ID {user.id}") + + return {"user": user} + + +def add_to_group_by_role(strategy, backend, details, response, user, *args, **kwargs): + """ + Adds user to group based on identity provider's role/group name + """ + + group_claim = settings.IDP_GROUPS_CLAIM + + logger.info(details) + logger.info(response) + + if group_claim: + group_names = details.get("iam_groups") + if group_names is None: + logger.info("add_to_group_by_role: no iam_groups") + group_names = [] + elif type(group_names) is str: + logger.info(f"add_to_group_by_role: group is {group_names}") + group_names = [group_names] + else: + logger.info(f"add_to_group_by_role: group is {', '.join(group_names)}") + + # groups are removed only if a group claim was configured + _remove_missing_groups(user, group_names) + + if group_names: + for g_name in group_names: + extended_groups = ExtendedGroup.objects.filter( + identity_groups__contains=g_name + ).select_related("group") + for extended_group in extended_groups: + logger.info(f"add_to_group_by_role: matched eg {extended_group.id}") + + group = extended_group.group + + if group: + logger.info(f"add_to_group_by_role: adding group {group.id}") + + if not user.groups.filter(id=group.id).exists(): + user.groups.add(group) + + else: + logger.info( + "add_to_group_by_role: extended group wasn't mapped to a group" + ) + + +def _remove_missing_groups(user, group_names): + """ + Removes groups that are in the DB but not in the token + """ + # don't remove groups that are not tied to an IDP group (custom added groups) + qs = user.groups.exclude(extended_group__identity_groups=[]) + + for group_name in group_names: + qs = qs.exclude(extended_group__identity_groups__contains=group_name) + + for group in qs: + logger.info(f"Removing group {group} from user {user}") + user.groups.remove(group) diff --git a/src/core/api/app/iam/backends/__init__.py b/src/core/api/app/iam/backends/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/iam/backends/auth0.py b/src/core/api/app/iam/backends/auth0.py new file mode 100644 index 00000000..2f8ca88f --- /dev/null +++ b/src/core/api/app/iam/backends/auth0.py @@ -0,0 +1,19 @@ +from django.conf import settings +from social_core.backends.auth0 import Auth0OAuth2 as Auth0Base +from social_core.backends.auth0 import jwt + + +class Auth0OAuth2(Auth0Base): + def get_user_details(self, response): + detail = super().get_user_details(response) + id_token = response.get("id_token") + jwks = self.get_json(self.api_path(".well-known/jwks.json")) + issuer = self.api_path() + audience = self.setting("KEY") # CLIENT_ID + payload = jwt.decode( + id_token, jwks, algorithms=["RS256"], audience=audience, issuer=issuer + ) + if settings.IDP_GROUPS_CLAIM: + # key is prefixed to avoid collition with user.groups + detail["iam_groups"] = payload.get(settings.IDP_GROUPS_CLAIM) + return detail diff --git a/src/core/api/app/iam/backends/azuread.py b/src/core/api/app/iam/backends/azuread.py new file mode 100644 index 00000000..9c4adcf3 --- /dev/null +++ b/src/core/api/app/iam/backends/azuread.py @@ -0,0 +1,13 @@ +from django.conf import settings +from social_core.backends.azuread_tenant import AzureADTenantOAuth2 as AzureBase + + +class AzureADTenantOAuth2(AzureBase): + def get_user_details(self, response): + details = super().get_user_details(response) + if not details["email"]: + details["email"] = response.get("email") + if settings.IDP_GROUPS_CLAIM: + # key is prefixed to avoid collition with user.groups + details["iam_groups"] = response.get(settings.IDP_GROUPS_CLAIM) + return details diff --git a/src/core/api/app/iam/backends/ping.py b/src/core/api/app/iam/backends/ping.py new file mode 100644 index 00000000..d4499eb4 --- /dev/null +++ b/src/core/api/app/iam/backends/ping.py @@ -0,0 +1,119 @@ +""" +Ping Federate OpenID Connect backend +""" + +from django.conf import settings +from jose import jwk, jwt +from jose.utils import base64url_decode +from requests.auth import HTTPBasicAuth +from social_core.backends.open_id_connect import OpenIdConnectAuth +from social_core.utils import handle_http_errors + + +class PingOneOpenIdConnect(OpenIdConnectAuth): + name = "ping_one" + OIDC_ENDPOINT = settings.SOCIAL_AUTH_PING_URL + REDIRECT_STATE = False + ACCESS_TOKEN_METHOD = "POST" + RESPONSE_TYPE = "code" + USERNAME_KEY = "preferred_username" + + def get_user_details(self, response): + username_key = self.setting("USERNAME_KEY", default=self.USERNAME_KEY) + fullname, first_name, last_name = self.get_user_names( + first_name=response.get("given_name"), last_name=response.get("family_name") + ) + detail = { + "username": response.get(username_key), + "email": response.get("email"), + "fullname": fullname, + "first_name": first_name, + "last_name": last_name, + } + if settings.IDP_GROUPS_CLAIM: + # key is prefixed to avoid collition with user.groups + detail["iam_groups"] = response.get(settings.IDP_GROUPS_CLAIM) + return detail + + # Monkey patched method to handle keys with missing "alg" by defaulting + # to RSA256. See https://github.com/python-social-auth/social-core/pull/661 + def find_valid_key(self, id_token): + kid = jwt.get_unverified_header(id_token).get("kid") + + keys = self.get_jwks_keys() + if kid is not None: + for key in keys: + if kid == key.get("kid"): + break + else: + keys = self.get_remote_jwks_keys() + + for key in keys: + if kid is None or kid == key.get("kid"): + if "alg" not in key: + key["alg"] = "RS256" + rsakey = jwk.construct(key) + message, encoded_sig = id_token.rsplit(".", 1) + decoded_sig = base64url_decode(encoded_sig.encode("utf-8")) + if rsakey.verify(message.encode("utf-8"), decoded_sig): + return key + return None + + +class PingFederateOpenIdConnect(PingOneOpenIdConnect): + name = "ping_federate" + + @handle_http_errors + def auth_complete(self, *args, **kwargs): + """Completes login process, must return user instance""" + state = self.validate_state() + self.process_error(self.data) + + params = self.auth_complete_params(state) + auth = None + + # Ping responds with bad request when these parameters are sent in the + # body and as auth headers. So we remove them from the body. + client_id, client_secret = self.get_key_and_secret() + if "client_id" in params: + del params["client_id"] + if "client_secret" in params: + del params["client_secret"] + auth = HTTPBasicAuth(client_id, client_secret) + + response = self.request_access_token( + self.access_token_url(), + data=params, + headers=self.auth_headers(), + auth=auth, + method=self.ACCESS_TOKEN_METHOD, + ) + + self.process_error(response) + return self.do_auth( + response["access_token"], response=response, *args, **kwargs + ) + + # Monkey patched method to handle keys with missing "alg" by defaulting + # to RSA256. See https://github.com/python-social-auth/social-core/pull/661 + def find_valid_key(self, id_token): + kid = jwt.get_unverified_header(id_token).get("kid") + + keys = self.get_jwks_keys() + if kid is not None: + for key in keys: + if kid == key.get("kid"): + break + else: + keys = self.get_remote_jwks_keys() + + for key in keys: + if kid is None or kid == key.get("kid"): + if "alg" not in key: + key["alg"] = "RS256" + rsakey = jwk.construct(key) + message, encoded_sig = id_token.rsplit(".", 1) + decoded_sig = base64url_decode(encoded_sig.encode("utf-8")) + if rsakey.verify(message.encode("utf-8"), decoded_sig): + return key + return None diff --git a/src/core/api/app/iam/migrations/0001_initial.py b/src/core/api/app/iam/migrations/0001_initial.py new file mode 100644 index 00000000..3360add7 --- /dev/null +++ b/src/core/api/app/iam/migrations/0001_initial.py @@ -0,0 +1,34 @@ +# Generated by Django 5.0.7 on 2025-01-28 22:22 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('projects', '0116_alter_environmentintegration_service_and_more'), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ] + + operations = [ + migrations.CreateModel( + name='DatacovesToken', + fields=[ + ('digest', models.CharField(max_length=128, primary_key=True, serialize=False)), + ('token_key', models.CharField(db_index=True, max_length=25)), + ('created', models.DateTimeField(auto_now_add=True)), + ('expiry', models.DateTimeField(blank=True, null=True)), + ('type', models.SmallIntegerField(choices=[(1, 'Account'), (2, 'Environment')])), + ('environment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='datacoves_tokens', to='projects.environment')), + ('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='datacoves_tokens', to='projects.project')), + ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='auth_token_set', to=settings.AUTH_USER_MODEL)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/iam/migrations/0002_alter_datacovestoken_type.py b/src/core/api/app/iam/migrations/0002_alter_datacovestoken_type.py new file mode 100644 index 00000000..3b5d9727 --- /dev/null +++ b/src/core/api/app/iam/migrations/0002_alter_datacovestoken_type.py @@ -0,0 +1,19 @@ +# Generated by Django 5.0.7 on 2025-02-11 04:23 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("iam", "0001_initial"), + ] + + operations = [ + migrations.AlterField( + model_name="datacovestoken", + name="type", + field=models.SmallIntegerField( + choices=[(1, "Project"), (2, "Environment")] + ), + ), + ] diff --git a/src/core/api/app/iam/migrations/0003_datacovestoken_is_system.py b/src/core/api/app/iam/migrations/0003_datacovestoken_is_system.py new file mode 100644 index 00000000..d2b282b2 --- /dev/null +++ b/src/core/api/app/iam/migrations/0003_datacovestoken_is_system.py @@ -0,0 +1,18 @@ +# Generated by Django 5.0.7 on 2025-04-10 17:27 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('iam', '0002_alter_datacovestoken_type'), + ] + + operations = [ + migrations.AddField( + model_name='datacovestoken', + name='is_system', + field=models.BooleanField(default=False), + ), + ] diff --git a/src/core/api/app/iam/migrations/__init__.py b/src/core/api/app/iam/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/iam/models/__init__.py b/src/core/api/app/iam/models/__init__.py new file mode 100644 index 00000000..82ea05d1 --- /dev/null +++ b/src/core/api/app/iam/models/__init__.py @@ -0,0 +1 @@ +from .token import * # noqa: F401,F403 diff --git a/src/core/api/app/iam/models/token.py b/src/core/api/app/iam/models/token.py new file mode 100644 index 00000000..5ecf4070 --- /dev/null +++ b/src/core/api/app/iam/models/token.py @@ -0,0 +1,37 @@ +from core.models import DatacovesModel +from django.db import models +from knox.models import AbstractAuthToken +from projects.models import Environment, Project + + +class DatacovesToken(AbstractAuthToken, DatacovesModel): + """Datacoves Tokens for Knox + + This is an extension of Knox to allow us to have environment or account + level tokens. They should be associated to a service account user. + """ + + TYPE_PROJECT = 1 + TYPE_ENVIRONMENT = 2 + + TYPES = ( + (TYPE_PROJECT, "Project"), + (TYPE_ENVIRONMENT, "Environment"), + ) + + type = models.SmallIntegerField(choices=TYPES, blank=False, null=False) + project = models.ForeignKey( + Project, + on_delete=models.CASCADE, + related_name="datacoves_tokens", + blank=True, + null=True, + ) + environment = models.ForeignKey( + Environment, + on_delete=models.CASCADE, + related_name="datacoves_tokens", + blank=True, + null=True, + ) + is_system = models.BooleanField(null=False, default=False) diff --git a/src/core/api/app/iam/oauth_validators.py b/src/core/api/app/iam/oauth_validators.py new file mode 100644 index 00000000..47ee4385 --- /dev/null +++ b/src/core/api/app/iam/oauth_validators.py @@ -0,0 +1,56 @@ +import re + +from clusters.adapters.all import EXTERNAL_ADAPTERS +from django.conf import settings +from oauth2_provider.oauth2_validators import OAuth2Validator +from projects.models import Environment + + +class CustomOAuth2Validator(OAuth2Validator): + oidc_claim_scope = None + # Set `oidc_claim_scope = None` to ignore scopes that limit which claims to return, + # otherwise the OIDC standard scopes are used. + + def get_additional_claims(self, request): + permissions = [] + client_name: str = request.client.name + client_name_match = re.match( + r"^cluster-(?P.+)|(?P.{6})-(?P.+)$", + client_name, + ) + groups = [] + if client_name_match: + data = client_name_match.groupdict() + env_slug = data.get("env_slug") + env_service = data.get("env_service") + cluster_service = data.get("cluster_service") + if env_slug is not None and env_service is not None: + if env_service in settings.SERVICES: + # If it is a valid service, try and get allowed actions + env = Environment.objects.get(slug=env_slug) + permissions = request.user.service_resource_permissions( + env_service, env=env + ) + if env_service in EXTERNAL_ADAPTERS: + groups = EXTERNAL_ADAPTERS[env_service].get_oidc_groups( + env, request.user + ) + elif cluster_service is not None: + if cluster_service in settings.CLUSTER_SERVICES: + permissions = request.user.service_resource_permissions( + cluster_service + ) + # Groups used as accounts + groups = [act.slug for act in request.user.accounts] + if request.user.is_superuser: + groups.append("datacoves-main") + + return { + "given_name": request.user.name, + "family_name": request.user.name, + "name": request.user.name, + "preferred_username": request.user.email, + "email": request.user.email, + "permissions": list(permissions), + "groups": groups, + } diff --git a/src/core/api/app/iam/permissions.py b/src/core/api/app/iam/permissions.py new file mode 100644 index 00000000..60ff7bf7 --- /dev/null +++ b/src/core/api/app/iam/permissions.py @@ -0,0 +1,98 @@ +from clusters.request_utils import get_cluster +from rest_framework import permissions +from users.models import Account + + +class HasAccessToAccount(permissions.BasePermission): + message = "This user doesn't have access to this resource." + + def has_permission(self, request, view): + account_slug = view.kwargs.get("account_slug") + return any(map(lambda a: account_slug == a.slug, request.user.accounts.all())) + + +class HasResourcePermission(permissions.BasePermission): + message = "This user doesn't have the required permissions for this resource." + + def has_permission(self, request, view): + account_slug = view.kwargs.get("account_slug") + url = request.get_full_path().split("/") + kind = url[2] + resource = url[4].split("?")[0] + # resource = re.search(r"\/api\/([a-z]+)(?:\/?.*)(\?*)", url).group(1) + permissions = request.user.get_account_permissions(account_slug).all() + names = [ + f"{account_slug}|{kind}:{resource}|read", + f"{account_slug}|{kind}:{resource}|write", + ] + if request.method == "GET": + return any(map(lambda perm: perm.name in names, permissions)) + else: + return any(map(lambda perm: perm.name == names[1], permissions)) + + +class HasAccessToProject(permissions.BasePermission): + def has_permission(self, request, view): + project_slug = view.kwargs.get("project_slug") + return any(map(lambda p: project_slug == p.slug, request.user.projects)) + + +class IsAccountOwner(permissions.BasePermission): + message = "This user is not the owner of the specified Account" + + def has_object_permission(self, request, view, obj): + return obj.owned_by == request.user + + +class IsProfileDeletionEnabled(permissions.BasePermission): + message = "User profile deletion feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + if request.method == "DELETE": + return features["user_profile_delete_account"] + return True + + +class IsProfileChangeNameEnabled(permissions.BasePermission): + message = "User profile name change feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + if request.method == "PUT": + return features["user_profile_change_name"] + return True + + +class IsGroupsAdminEnabled(permissions.BasePermission): + message = "Groups admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_groups"] or features["admin_users"] + + +class IsUsersAdminEnabled(permissions.BasePermission): + message = "Users admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_users"] + + +class AccountIsNotOnTrial(permissions.BasePermission): + message = "This feature is not enabled on Trial accounts" + + def has_permission(self, request, view): + account_slug = view.kwargs.get("account_slug") + account = Account.objects.get(slug=account_slug) + return not account.is_on_trial + + +class AccountIsNotSuspended(permissions.BasePermission): + message = "This feature is not enabled if account is suspended." + + def has_permission(self, request, view): + account_slug = view.kwargs.get("account_slug") + account = Account.objects.get(slug=account_slug) + return not account.is_suspended(get_cluster(request)) diff --git a/src/core/api/app/iam/serializers.py b/src/core/api/app/iam/serializers.py new file mode 100644 index 00000000..ce22127e --- /dev/null +++ b/src/core/api/app/iam/serializers.py @@ -0,0 +1,268 @@ +from django.contrib.auth.models import Group, Permission +from projects.cryptography import ED25519_KEY_TYPE +from projects.models import SSHKey, SSLKey, UserCredential +from projects.models.environment import Environment +from projects.models.project import Project +from projects.serializers import MinimalEnvironmentSerializer, MinimalProjectSerializer +from rest_framework import serializers +from rest_framework.exceptions import ValidationError +from rest_framework_simplejwt.serializers import TokenObtainPairSerializer +from users.models import Account, ExtendedGroup, User, parse_permission_name + + +class ExtendedGroupSerializer(serializers.ModelSerializer): + project = MinimalProjectSerializer(read_only=True) + project_id = serializers.IntegerField( + write_only=True, required=False, allow_null=True + ) + environment = MinimalEnvironmentSerializer(read_only=True) + environment_id = serializers.IntegerField( + write_only=True, required=False, allow_null=True + ) + + class Meta: + model = ExtendedGroup + fields = ( + "identity_groups", + "description", + "name", + "id", + "project", + "project_id", + "environment", + "environment_id", + ) + + +class PermissionSerializer(serializers.Serializer): + id = serializers.IntegerField(write_only=True) + + def to_representation(self, permission): + data = parse_permission_name(permission) + data["id"] = permission.id + data["account"] = data.pop("account_slug") + data["project"] = data.pop("project_slug") + data["environment"] = data.pop("environment_slug") + if Project.objects.filter(slug=data["project"]).exists(): + data["project_id"] = Project.objects.get(slug=data["project"]).id + if Environment.objects.filter(slug=data["environment"]).exists(): + data["environment_id"] = Environment.objects.get( + slug=data["environment"] + ).id + return data + + +class GroupSerializer(serializers.ModelSerializer): + extended_group = ExtendedGroupSerializer() + permissions = PermissionSerializer(many=True) + users_count = serializers.SerializerMethodField() + + class Meta: + model = Group + fields = ( + "name", + "permissions", + "extended_group", + "id", + "users_count", + ) + + def create(self, validated_data): + account_slug = self.context["account"] + permissions = validated_data.pop("permissions") + extended_group_data = validated_data.pop("extended_group") + validated_data["name"] = f"'{account_slug}' {validated_data['name']}" + new_group = Group.objects.create(**validated_data) + account = Account.objects.get(slug=self.context["account"]) + extended_group = ExtendedGroup(**extended_group_data) + extended_group.group = new_group + extended_group.account = account + extended_group.save() + for permission_data in permissions: + id = permission_data["id"] + try: + permission = Permission.objects.get(id=id) + except Permission.DoesNotExist: + raise ValidationError(f"Permission {id} does not exist") + if account_slug not in permission.name: + raise ValidationError(f"Permission {id} does not belong to account") + new_group.permissions.add(permission) + return new_group + + def update(self, instance, validated_data): + permissions = validated_data.pop("permissions") + extended_group_data = validated_data.pop("extended_group") + account_slug = self.context["account"] + ExtendedGroup.objects.filter(id=instance.extended_group.id).update( + **extended_group_data + ) + + if ( + len(permissions) < instance.permissions.count() + and instance.user_set.count() > 0 + ): + new_perms = [perm["id"] for perm in permissions] + current_perms = [perm.id for perm in instance.permissions.all()] + removed_perms = list(set(current_perms) - set(new_perms)) + + user_admin_perm = Account.get_users_admin_permissions(account_slug) + if user_admin_perm.first().id in removed_perms: + admin_groups = Group.objects.filter( + permissions__in=user_admin_perm, user__id__gt=0 + ).count() + if admin_groups == 1: + raise ValidationError( + "Please keep at least one group with users admin permission." + ) + + group_admin_perm = Account.get_groups_admin_permissions(account_slug) + if group_admin_perm.first().id in removed_perms: + admin_groups = Group.objects.filter( + permissions__in=group_admin_perm, user__id__gt=0 + ).count() + if admin_groups == 1: + raise ValidationError( + "Please keep at least one group with groups admin permission." + ) + + self._sync_permissions(instance, permissions) + return instance + + def _sync_permissions(self, instance, permissions): + """Syncs group permissions""" + for permission in instance.permissions.exclude( + id__in=[permission_data["id"] for permission_data in permissions] + ): + instance.permissions.remove(permission) + for permission_data in permissions: + id = permission_data["id"] + try: + permission = Permission.objects.get(id=id) + except Permission.DoesNotExist: + raise ValidationError(f"Permission {id} does not exist") + if self.context["account"] not in permission.name: + raise ValidationError(f"Permission {id} does not belong to account") + instance.permissions.add(permission) + + def get_users_count(self, obj): + return obj.user_set.count() + + +class ProfileSerializer(serializers.ModelSerializer): + class Meta: + model = User + fields = ("name",) + + +class UserCredentialSerializer(serializers.ModelSerializer): + class Meta: + model = UserCredential + fields = ( + "id", + "name", + "environment", + "connection_template", + "connection_overrides", + "ssl_key", + "validated_at", + ) + + def create(self, validated_data): + validated_data["user"] = self.context["request"].user + return super().create(validated_data) + + def update(self, instance, validated_data): + validated_data["user"] = self.context["request"].user + + if validated_data.get("ssl_key"): + if "password" in validated_data["connection_overrides"]: + del validated_data["connection_overrides"]["password"] + else: + # Setting password only if it has a value and was already set in the db + password = instance.connection_overrides.get("password") + if password is not None and not validated_data["connection_overrides"].get( + "password" + ): + validated_data["connection_overrides"]["password"] = password + + return super().update(instance, validated_data) + + def to_representation(self, instance): + rep = super().to_representation(instance) + if "password" in rep["connection_overrides"]: + del rep["connection_overrides"]["password"] + return rep + + +class UserSSHKeySerializer(serializers.ModelSerializer): + repos = serializers.SerializerMethodField() + public = serializers.ReadOnlyField() + + class Meta: + model = SSHKey + fields = ("id", "key_type", "public", "repos", "private") + extra_kwargs = { + "private": {"write_only": True, "required": False}, + } + + def create(self, validated_data): + user = self.context["request"].user + if user.ssh_keys.filter(usage=SSHKey.USAGE_USER).count() > 0: + # TODO: Remove this restriction + raise ValidationError("Users can't have more than one active SSH key.") + private = validated_data.get("private") + key_type = validated_data.get("key_type", ED25519_KEY_TYPE) + + try: + return SSHKey.objects.new( + created_by=user, + associate=True, + private=private, + key_type=key_type, + ) + except ValueError as ex: + raise ValidationError(ex) + + def get_repos(self, obj): + return [ + { + "id": repo["id"], + "url": repo["repository__git_url"], + "validated_at": repo["validated_at"], + } + for repo in obj.users.filter(user=self.context["request"].user).values( + "id", "repository__git_url", "validated_at" + ) + ] + + +class UserSSLKeySerializer(serializers.ModelSerializer): + public = serializers.ReadOnlyField() + + class Meta: + model = SSLKey + fields = ("id", "key_type", "public", "private") + extra_kwargs = { + "private": {"write_only": True, "required": False}, + } + + def create(self, validated_data): + user = self.context["request"].user + private = validated_data.get("private") + try: + return SSLKey.objects.new(created_by=user, private=private) + except ValueError as ex: + raise ValidationError(ex) + + +class MyTokenObtainPairSerializer(TokenObtainPairSerializer): + @classmethod + def get_token(cls, user: User): + token = super().get_token(user) + + # Add custom claims + token["name"] = user.name + token["email"] = user.email + token["permissions"] = list(user.permissions_names) + + return token diff --git a/src/core/api/app/iam/tasks.py b/src/core/api/app/iam/tasks.py new file mode 100644 index 00000000..10982873 --- /dev/null +++ b/src/core/api/app/iam/tasks.py @@ -0,0 +1,103 @@ +from collections import defaultdict + +from celery import shared_task +from django.conf import settings +from iam.auth_pipeline import _remove_missing_groups +from users.models import ExtendedGroup, User + +from datacoves.celery import app + + +@shared_task +def clear_tokens(): + from oauth2_provider.models import clear_expired + + clear_expired() + + +def _get_ldap_credentials(): + return { + "host": settings.LDAP_HOST, + "username": settings.LDAP_USERNAME, + "password": settings.LDAP_PASSWORD, + } + + +def get_ldap_group_users(host, username, password, group_name): + import ldap + + # prevent ssl verification + ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) + + l = ldap.initialize(host) + l.set_option(ldap.OPT_PROTOCOL_VERSION, ldap.VERSION3) + l.set_option(ldap.OPT_TIMELIMIT, 0) + l.set_option(ldap.OPT_DEREF, ldap.DEREF_NEVER) + l.set_option(ldap.OPT_X_TLS, ldap.OPT_X_TLS_DEMAND) + l.set_option(ldap.OPT_X_TLS_DEMAND, True) + l.set_option(ldap.OPT_DEBUG_LEVEL, 255) + + # login + l.bind(username, password) + l.result() + + filter_query = (settings.LDAP_FILTER_QUERY or "{group}").format(group=group_name) + query = l.search_ext( + settings.LDAP_BASE, + ldap.SCOPE_SUBTREE, + filter_query, + ["*"], + attrsonly=0, + timeout=-1, + sizelimit=0, + ) + response = l.result(query, all=1, timeout=-1) + user_mails = [] + for user in response[1]: + user_response = user[1] + user_mail = user_response.get("mail") + if isinstance(user_mail, list) and len(user_mail): + user_mail = user_mail[0] + if isinstance(user_mail, bytes): + user_mail = user_mail.decode() + user_mails.append(user_mail) + return user_mails + + +def _get_groups_name(): + group_names = [] + identity_groups_query = ExtendedGroup.objects.values_list( + "identity_groups", flat=True + ) + for identity_group in identity_groups_query: + if isinstance(identity_group, list): + group_names.extend(identity_group) + + # avoid duplicate groups in the list + group_names = list(set(group_names)) + return group_names + + +@app.task +def remove_missing_user_groups(): + credentials = _get_ldap_credentials() + host = credentials.get("host") + username = credentials.get("username") + password = credentials.get("password") + + if not host or not username or not password: + return + + group_names = _get_groups_name() + + # association is a dict like { : [ group1, group2 ], : [ group3 ] } + associations = defaultdict(list) + for group_name in group_names: + response = get_ldap_group_users(host, username, password, group_name) + for user in response: + associations[user.upper()].append(group_name) + for user in User.objects.exclude(deactivated_at__isnull=False).exclude( + is_superuser=True + ): + user_group_names = associations.get(user.email.upper(), []) + _remove_missing_groups(user, user_group_names) diff --git a/src/core/api/app/iam/urls.py b/src/core/api/app/iam/urls.py new file mode 100644 index 00000000..1b817dea --- /dev/null +++ b/src/core/api/app/iam/urls.py @@ -0,0 +1,11 @@ +from django.urls import path + +from . import views + +urlpatterns = [ + path("login", views.login), + path("logout", views.logout), + path("hello", views.hello), + path("me", views.me), + path("login-error", views.login_error), +] diff --git a/src/core/api/app/iam/views.py b/src/core/api/app/iam/views.py new file mode 100644 index 00000000..24bf2dfa --- /dev/null +++ b/src/core/api/app/iam/views.py @@ -0,0 +1,526 @@ +import json +from urllib import parse + +import billing.manager +from clusters.request_utils import get_cluster +from core.mixins.views import ( + AddAccountToContextMixin, + VerboseCreateModelMixin, + VerboseUpdateModelMixin, +) +from django.conf import settings +from django.contrib.auth import logout as django_logout +from django.contrib.auth.decorators import login_required +from django.contrib.auth.models import ContentType, Group, Permission +from django.db.models import Q +from django.http import HttpResponse +from django.shortcuts import redirect +from django.utils import timezone +from django.utils.datastructures import MultiValueDictKeyError +from django_filters.rest_framework import DjangoFilterBackend +from iam.permissions import ( + AccountIsNotOnTrial, + AccountIsNotSuspended, + HasResourcePermission, + IsGroupsAdminEnabled, + IsProfileChangeNameEnabled, + IsProfileDeletionEnabled, + IsUsersAdminEnabled, +) +from knox.auth import TokenAuthentication +from projects.models import SSHKey, SSLKey, UserCredential, UserEnvironment +from projects.serializers import UserEnvironmentVariablesSerializer +from rest_framework import filters, generics, status +from rest_framework.exceptions import NotFound, ValidationError +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response +from social_django.models import UserSocialAuth +from social_django.views import auth +from users.models import Account, ExtendedGroup, User +from users.serializers import AccountSerializer, UserInfoSerializer, UserSerializer + +from .permissions import HasAccessToAccount, IsAccountOwner +from .serializers import ( + GroupSerializer, + PermissionSerializer, + ProfileSerializer, + UserCredentialSerializer, + UserSSHKeySerializer, + UserSSLKeySerializer, +) + + +def login(request): + """Login handler redirected to social_django""" + return auth(request, settings.IDENTITY_PROVIDER) + + +@login_required +def logout(request): + return_to = request.GET.get("next", settings.LOGOUT_REDIRECT) + if settings.IDENTITY_PROVIDER == "auth0": + domain = settings.SOCIAL_AUTH_AUTH0_DOMAIN + client_id = settings.SOCIAL_AUTH_AUTH0_KEY + url = f"https://{domain}/v2/logout?" + parse.urlencode( + {"client_id": client_id, "returnTo": return_to} + ) + elif settings.IDENTITY_PROVIDER == "ping_federate": + url = return_to + elif settings.IDENTITY_PROVIDER == "azuread-tenant-oauth2": + social_user = UserSocialAuth.objects.filter( + user=request.user, provider=settings.IDENTITY_PROVIDER + ).first() + if social_user: + if type(social_user.extra_data) is str: + social_user.extra_data = json.loads(social_user.extra_data) + url = ( + "https://login.microsoftonline.com/" + + f"{settings.SOCIAL_AUTH_AZUREAD_TENANT_OAUTH2_TENANT_ID}" + + "/oauth2/v2.0/logout?" + + parse.urlencode( + { + "id_token_hint": social_user.extra_data["id_token"], + "post_logout_redirect_uri": return_to, + } + ) + ) + elif settings.IDENTITY_PROVIDER == "ping_one": + social_user = UserSocialAuth.objects.filter( + user=request.user, provider=settings.IDENTITY_PROVIDER + ).first() + if social_user: + if type(social_user.extra_data) is str: + social_user.extra_data = json.loads(social_user.extra_data) + client_id = settings.SOCIAL_AUTH_PING_KEY + url = f"{settings.SOCIAL_AUTH_PING_URL}/signoff?" + parse.urlencode( + { + "id_token_hint": social_user.extra_data["id_token"], + "post_logout_redirect_uri": return_to, + } + ) + response = redirect(url) + cluster = get_cluster(request) + for env in request.user.environments: + # Removing pomerium cookies + response.delete_cookie(f"_{env.slug}", domain=f".{cluster.domain}") + django_logout(request) + return response + + +def login_error(request): + msg = "Could not find user in database." + return HttpResponse(msg) + + +class UserInfo(generics.RetrieveAPIView): + serializer_class = UserInfoSerializer + permission_classes = [IsAuthenticated] + + def get_object(self): + # if user was deactivated, we reactivate it + user = self.request.user + if user.deactivated_at: + user.deactivated_at = None + user.save() + return user + + def get_serializer_context(self): + context = super().get_serializer_context() + context.update( + { + "environment": self.request.GET.get("environment"), + "account": self.request.GET.get("account"), + }, + ) + return context + + +class UserAccounts(generics.ListAPIView): + serializer_class = AccountSerializer + permission_classes = [IsAuthenticated] + + def get_queryset(self): + return self.request.user.accounts.order_by("-created_at") + + +class GroupMixin: + serializer_class = GroupSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsGroupsAdminEnabled, + AccountIsNotOnTrial, + AccountIsNotSuspended, + ] + + def get_queryset(self): + return ( + Group.objects.filter( + extended_group__account__slug=self.kwargs.get("account_slug") + ) + .exclude(extended_group__role=ExtendedGroup.Role.ROLE_DEFAULT) + .order_by("name") + ) + + +class GroupList( + GroupMixin, + VerboseCreateModelMixin, + AddAccountToContextMixin, + generics.ListCreateAPIView, +): + filter_backends = [filters.SearchFilter] + search_fields = [ + "extended_group__name", + "extended_group__environment__name", + "extended_group__environment__slug", + "extended_group__project__name", + ] + + +class GroupDetail( + GroupMixin, + VerboseUpdateModelMixin, + AddAccountToContextMixin, + generics.RetrieveUpdateDestroyAPIView, +): + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + if instance.user_set.count() > 0: + # If group has users, can't be deleted + return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) + else: + self.perform_destroy(instance) + return Response(status=status.HTTP_204_NO_CONTENT) + + +def get_users_queryset(slug): + # Filtering out superusers so we can add superusers stealthy to accounts to debug issues + return ( + User.objects.exclude(deactivated_at__isnull=False) + .exclude(is_superuser=True) + .filter(groups__extended_group__account__slug=slug) + .order_by("name") + .distinct() + ) + + +class UserMixin: + serializer_class = UserSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsUsersAdminEnabled, + AccountIsNotOnTrial, + AccountIsNotSuspended, + ] + + def get_queryset(self): + return get_users_queryset(self.kwargs.get("account_slug")) + + +class UserList(UserMixin, AddAccountToContextMixin, generics.ListAPIView): + filter_backends = [filters.SearchFilter, DjangoFilterBackend] + search_fields = ["name", "email"] + filterset_fields = ["groups"] + + +class UserDetail( + UserMixin, + VerboseUpdateModelMixin, + AddAccountToContextMixin, + generics.RetrieveUpdateDestroyAPIView, +): + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + if instance == request.user: + # We don't allow deleting self + return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) + else: + try: + account = Account.objects.get(slug=self.kwargs.get("account_slug")) + except MultiValueDictKeyError: + raise ValidationError("Missing account parameter.") + except Account.DoesNotExist: + raise NotFound("Account not found.") + admin_users = list(Account.get_admin_users(self.kwargs.get("account_slug"))) + if instance in admin_users and len(admin_users) == 1: + # We can't remove the last admin + return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED) + else: + groups = Group.objects.filter(extended_group__account=account) + for group in groups: + instance.groups.remove(group) + return Response(status=status.HTTP_204_NO_CONTENT) + + +class AccountPermissionList(generics.ListAPIView): + serializer_class = PermissionSerializer + permission_classes = [IsAuthenticated, HasAccessToAccount] + + def get_queryset(self): + account_slug = self.kwargs.get("account_slug") + project_slug = self.request.GET.get("project", "") + + if project_slug: + project_slug = ":" + project_slug + + name = f"{account_slug}{project_slug}" + content_type = ContentType.objects.get(app_label="users", model="account") + return Permission.objects.filter(content_type=content_type).filter( + Q(name__startswith=name + ":") | Q(name__startswith=name + "|") + ) + + +class ProfileDetail(VerboseUpdateModelMixin, generics.RetrieveUpdateDestroyAPIView): + serializer_class = ProfileSerializer + permission_classes = [ + IsAuthenticated, + IsProfileDeletionEnabled, + IsProfileChangeNameEnabled, + ] + + def get_object(self): + return self.request.user + + def perform_destroy(self, instance): + # FIXME: Users deactivation disabled since users that come back to the platform + # are not able to re-activate their users + # instance.deactivated_at = timezone.now() + # instance.save() + pass + + +class ProfileSSHKeyMixin: + serializer_class = UserSSHKeySerializer + permission_classes = [IsAuthenticated] + + def get_queryset(self): + # prefetch related to return user repositories + return SSHKey.objects.prefetch_related("users").filter( + created_by=self.request.user, usage=SSHKey.USAGE_USER + ) + + +class ProfileSSHKeyList( + ProfileSSHKeyMixin, VerboseCreateModelMixin, generics.ListCreateAPIView +): + """ + List user SSH Keys + """ + + pass + + +class ProfileSSHKeyDetail(ProfileSSHKeyMixin, generics.DestroyAPIView): + """ + Delete user SSH Keys + """ + + pass + + +class ProfileSSLKeyMixin: + serializer_class = UserSSLKeySerializer + permission_classes = [IsAuthenticated] + + def get_queryset(self): + return SSLKey.objects.filter( + created_by=self.request.user, usage=SSLKey.USAGE_USER + ) + + +class ProfileSSLKeyList( + ProfileSSLKeyMixin, VerboseCreateModelMixin, generics.ListCreateAPIView +): + """ + List user SSL Keys + """ + + pass + + +class ProfileSSLKeyDetail(ProfileSSLKeyMixin, generics.DestroyAPIView): + """ + Delete user SSL Keys + """ + + pass + + +class ProfileCredentialMixin: + def get_queryset(self): + return UserCredential.objects.filter(user=self.request.user) + + def get_integrity_exception_message(self, ex, data): + message = str(ex) + if "User credential uniqueness" in message: + return f"Connection name '{data['name']}' can not be reused, please choose a new one." + else: + return message + + +class ProfileCredentialList( + ProfileCredentialMixin, VerboseCreateModelMixin, generics.ListCreateAPIView +): + """ + List all/environment's UserCredential or create a new instance of it + """ + + serializer_class = UserCredentialSerializer + permission_classes = [IsAuthenticated] + filter_backends = [DjangoFilterBackend] + filterset_fields = ["environment"] + + +class ProfileCredentialDetail( + ProfileCredentialMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + """ + Get, update or delete an individual UserCredential + """ + + serializer_class = UserCredentialSerializer + permission_classes = [IsAuthenticated] + + +class AccountDetail(VerboseUpdateModelMixin, generics.RetrieveUpdateDestroyAPIView): + serializer_class = AccountSerializer + permission_classes = [ + IsAuthenticated, + IsAccountOwner, + ] + lookup_field = "slug" + lookup_url_kwarg = "account_slug" + + def get_queryset(self): + return Account.objects.filter(created_by=self.request.user) + + def perform_destroy(self, instance): + billing.manager.cancel_subscription(instance) + instance.deactivated_at = timezone.now() + instance.save() + + +class UserEnvironmentVariablesDetail(VerboseUpdateModelMixin, generics.UpdateAPIView): + serializer_class = UserEnvironmentVariablesSerializer + permission_classes = [IsAuthenticated] + + def get_queryset(self): + return UserEnvironment.objects.filter(user=self.request.user) + + +class ValidateDatacovesToken(generics.ListAPIView): + """ + This validates a datacoves token + """ + + authentication_classes = [TokenAuthentication] + permission_classes = [IsAuthenticated] + + def get_queryset(self): + """Just to make sure this works with django rest framework; we are + going to ignore the queryset results since what we want is in request + """ + return User.objects.none() + + def get(self, request, type=None, id_or_slug=None, *args, **kwargs): + """This only checks for things we have "write" permission to. + It also doesn't quite work right with just account level permissions, + but we don't do that so that's okay for now. I'm not even sure how + to define it because our permissions don't really work like that. + """ + + permissions = list(request.user.permissions_names) + projects = list( + request.user.projects.all() + .only("id", "slug", "account__id", "account__slug") + .prefetch_related("account") + ) + accounts = list(request.user.accounts.all().only("id", "slug")) + environments = list( + request.user.environments.all() + .only( + "id", + "slug", + "project__id", + "project__slug", + "project__account__id", + "project__account__slug", + ) + .prefetch_related("project", "project__account") + ) + + # Build a permissions map for crunching permissions for each + # environment. + account_perms = {} + project_perms = {} + environment_perms = {} + + for perm in permissions: + parts = perm.split("|") + + # Only care about write perms + if not parts or parts[-1] != "write": + continue + + # what do we have for account, etc: + level = parts[0].split(":") + + if len(level) == 1: # account level + account_perms[level[0]] = True + + elif len(level) == 2: # project level + project_perms[level[1]] = True + + elif len(level) == 3: # environment level + environment_perms[level[2]] = True + + # Now let's return the permissions the user has write access to. + filtered_projects = [ + x + for x in projects + if x.account.slug in account_perms + or x.slug in project_perms + or any( + [ + x.slug == y.project.slug + for y in environments + if y.slug in environment_perms + ] + ) + ] + + filtered_environments = [ + x + for x in environments + if x.account.slug in account_perms + or x.project.slug in project_perms + or x.slug in environment_perms + ] + + filtered_accounts = [ + x + for x in accounts + if x.slug in account_perms + or filtered_projects + and any([y.account.slug == x.slug for y in projects]) + or filtered_environments + and any([y.project.account.slug == x.slug for y in environments]) + ] + + return Response( + { + "email": request.user.email, + "permissions": permissions, + "name": request.user.name, + "account_ids": [x.id for x in filtered_accounts], + "accounts": [x.slug for x in filtered_accounts], + "project_ids": [x.id for x in filtered_projects], + "projects": [x.slug for x in filtered_projects], + "environment_ids": [x.id for x in filtered_environments], + "environments": [x.slug for x in filtered_environments], + } + ) diff --git a/src/core/api/app/integration_tests/base_test.py b/src/core/api/app/integration_tests/base_test.py new file mode 100644 index 00000000..4ae76d51 --- /dev/null +++ b/src/core/api/app/integration_tests/base_test.py @@ -0,0 +1,545 @@ +import asyncio +import gc +import os +import shutil +import traceback +from itertools import count +from pathlib import Path +from typing import List, Tuple +from uuid import uuid4 + +from billing.models import Plan +from channels.db import database_sync_to_async +from channels.testing import ChannelsLiveServerTestCase +from clusters import workspace +from clusters.config_loader.cluster import ClusterConfigLoader +from clusters.config_loader.environment import EnvironmentConfigLoader +from clusters.models import Cluster +from codegen.models import SQLHook, Template +from daphne.server import Server +from daphne.testing import DaphneProcess, _reinstall_reactor +from django.db import connection +from integration_tests import utils_test +from playwright.async_api import ConsoleMessage, Page, async_playwright +from projects.management.commands.load_releases import load_releases +from projects.models import ( + ConnectionTemplate, + ConnectionType, + Environment, + Profile, + Project, + Release, +) +from twisted.internet import reactor +from users.models import Group, User + +RECORD_VIDEO = False + + +class DaphneProcessDatacoves(DaphneProcess): + def __init__(self, host, get_application, kwargs=None, setup=None, teardown=None): + super().__init__("0.0.0.0", get_application, kwargs, setup, teardown) + + def run(self): + # OK, now we are in a forked child process, and want to use the reactor. + # However, FreeBSD systems like MacOS do not fork the underlying Kqueue, + # which asyncio (hence asyncioreactor) is built on. + # Therefore, we should uninstall the broken reactor and install a new one. + + from daphne.endpoints import build_endpoint_description_strings + + _reinstall_reactor() + + application = self.get_application() + + try: + # Create the server class + endpoints = build_endpoint_description_strings(host=self.host, port=8000) + self.server = Server( + application=application, + endpoints=endpoints, + signal_handlers=False, + **self.kwargs, + ) + # Set up a poller to look for the port + reactor.callLater(0.1, self.resolve_port) + # Run with setup/teardown + if self.setup is not None: + self.setup() + try: + self.server.run() + finally: + if self.teardown is not None: + self.teardown() + self._cleanup_reactor() + + except BaseException as e: + # Put the error on our queue so the parent gets it + self.errors.put((e, traceback.format_exc())) + + def _cleanup_reactor(self): + try: + # Forzar la limpieza del reactor + if reactor.running: + reactor.stop() + for delayed in reactor.getDelayedCalls(): + if delayed.active(): + delayed.cancel() + except Exception: + pass + + +class ChannelsLiveServerTestCaseDatacoves(ChannelsLiveServerTestCase): + ProtocolServerProcess = DaphneProcessDatacoves + + @classmethod + def tearDownClass(cls): + try: + # Force cleanup of server processes + if hasattr(cls, "server_thread") and cls.server_thread is not None: + cls.server_thread.terminate() + cls.server_thread.join(timeout=1) + except Exception: + pass + super().tearDownClass() + + +class PlaywrightChannelsTestCase(ChannelsLiveServerTestCaseDatacoves): + DEFAULT_TEST_DELAY = 2.0 + + def __init__(self, methodName="runTest"): + super().__init__(methodName) + self._domain = "datacoveslocal.com" + self._ssh_key_title: str = None + self._subpath = None + self._screenshot_cont = count(1) + self._page: Page = None + self._browser_console = [] + self._context = None + self._browser = None + self._playwright = None + self.test_delay = float(os.getenv("TEST_DELAY", self.DEFAULT_TEST_DELAY)) + + async def asyncSetUp(self): + """Async setup method to initialize browser context""" + await self._wait_between_tests() + await self._ensure_clean_browser_state() + await self.set_browser_context() + + async def _wait_between_tests(self): + """Wait between tests with the configured time""" + delay = getattr(self, "test_delay", self.DEFAULT_TEST_DELAY) + await asyncio.sleep(delay) + + async def _ensure_clean_browser_state(self): + """Ensure the browser state is clean before starting""" + await self.teardown_browser() + gc.collect() + await asyncio.sleep(0.5) + + async def asyncTearDown(self): + """Async teardown method to clean up browser resources""" + if self._page and RECORD_VIDEO: + await self.save_video() + + await self.teardown_browser() + self._page = None + self._context = None + self._browser = None + self._playwright = None + + gc.collect() + await asyncio.sleep(0.5) + + def tearDown(self): + """Synchronous teardown for database cleanup""" + try: + envs_deleted, details = Environment.objects.all().delete() + print(f"Environments deleted: {envs_deleted}") + print(f"Details: {details}") + + if self._ssh_key_title: + utils_test.github_ssh_delete_all_by_title(self._ssh_key_title) + + """ + Workaround to fix: + psycopg2.errors.FeatureNotSupported: cannot truncate a table referenced in a foreign key constraint + DETAIL: Table "knox_authtoken" references "users_user". + HINT: Truncate table "knox_authtoken" at the same time, or use TRUNCATE ... CASCADE. + """ + with connection.cursor() as cursor: + cursor.execute( + """ + DO $$ + DECLARE constraint_name TEXT; + BEGIN + SELECT conname INTO constraint_name + FROM pg_constraint + WHERE conrelid = 'knox_authtoken'::regclass + AND contype = 'f'; + + IF constraint_name IS NOT NULL THEN -- Check if the constraint exists + EXECUTE format('ALTER TABLE knox_authtoken DROP CONSTRAINT %I', constraint_name); + END IF; + END $$; + """ + ) + + finally: + super().tearDown() + + async def teardown_browser(self): + """Clean up browser resources""" + try: + if self._context: + await self._context.close() + print("Context closed.") + if self._browser: + await self._browser.close() + print("Browser closed.") + if self._playwright: + await self._playwright.stop() + print("Playwright stopped.") + except Exception as e: + print(f"Error during browser teardown: {e}") + + async def set_browser_context(self): + """Initialize browser context with proper configuration""" + try: + self._playwright = await async_playwright().start() + self._browser = await self._playwright.firefox.launch( + headless=True, args=["--no-sandbox", "--disable-dev-shm-usage"] + ) + + video_config = ( + { + "record_video_dir": f"integration_tests/output/{self._subpath}/video", + "record_video_size": {"width": 1280, "height": 720}, + } + if RECORD_VIDEO + else {} + ) + + self._context = await self._browser.new_context( + ignore_https_errors=True, + viewport={"width": 1280, "height": 720}, + **video_config, + ) + + self._page = await self._context.new_page() + self._page.set_default_timeout(utils_test.DEFAULT_TIMEOUT) + self._page.on( + "console", + lambda msg: asyncio.create_task(self.handle_console_message(msg)), + ) + except Exception as e: + print(f"Error during browser setup: {e}") + await self.teardown_browser() + raise + + async def save_video(self): + """Save recorded video if enabled""" + try: + if hasattr(self._page, "video") and self._page.video: + video_path = await self._page.video.path() + new_path = f"integration_tests/output/{self._subpath}/video.webm" + shutil.move(video_path, new_path) + except Exception as e: + print(f"Error saving video: {e}") + + @property + def page(self) -> Page: + return self._page + + @property + def domain(self) -> str: + return self._domain + + @property + def get_release(self) -> str: + """Get release number from cluster-params.yaml""" + return os.getenv("RELEASE") + + @property + def ssh_key_title(self) -> str: + if self._ssh_key_title is None: + self._ssh_key_title = str(uuid4()) + return self._ssh_key_title + + def get_timeout(self, minutes) -> int: + return minutes * utils_test.ONE_MINUTE_IN_MS + + async def handle_console_message(self, msg: ConsoleMessage): + if "JSHandle@object" in msg.text: + args_values = [await arg.evaluate("arg => arg") for arg in msg.args] + text = " ".join(map(str, args_values)) + else: + text = msg.text + + self._browser_console.append(f"[{msg.type}] {text}") + + async def dump_browser_console(self): + dump_path = Path(f"integration_tests/output/{self._subpath}/logs/") + dump_path.mkdir(parents=True, exist_ok=True) + filename = "browser_console.txt" + dump_path = dump_path / filename + + with open(dump_path, "w") as f: + f.writelines( + [ + line if line.endswith("\n") else line + "\n" + for line in self._browser_console + ] + ) + + async def dump_pod_logs(self, pods: List[Tuple[str, str]] = []): + await self.screenshot() + await self.dump_browser_console() + await utils_test.dump_pod_status(self._subpath) + for pod in pods: + pod_name = pod[0] + container_name = pod[1] if len(pod) == 2 else None + await utils_test.dump_pod_logs( + self._subpath, pod_name=pod_name, container=container_name + ) + + if RECORD_VIDEO: + # Close the context before getting the video + await self._context.close() + await self.save_video() + + async def screenshot(self, delay=1, full_page=False) -> str: + """Gets screenshots path to storage screenshots""" + await asyncio.sleep(delay) + sufix = next(self._screenshot_cont) + path = f"integration_tests/output/{self._subpath}/screenshots/screenshot_{sufix}.png" + await self._page.screenshot(path=path, full_page=full_page) + + async def clean_toast(self): + await self._page.get_by_role("button", name="Close").first.click() + + async def user_session(self): + password = "testing123" + + @database_sync_to_async + def create_user(): + user = User.objects.create_user( + email="john@datacoves.com", + password=password, + name="John", + setup_enabled=True, + ) + user.is_superuser = True + user.save() + return user + + user = await create_user() + print("User created:", user.email) + self.user_session = {"user": user, "password": password} + return self.user_session + + async def cluster_setup(self, user): + @database_sync_to_async + def create_cluster(): + utils_test.check_namespace_terminated() + + load_releases() + cluster_params = { + "domain": self._domain, + "context": "kind-datacoves-cluster", + "provider": "kind", + "kubernetes_version": "1.35.0", + "release": self.get_release, + "account": { + "name": "Local", + "slug": "local", + "owner": {"email": "hey@datacoves.com", "name": "Datacoves Admin"}, + }, + "projects": { + "analytics": { + "name": "Analytics", + "slug": "analytics-local", + "clone_strategy": "http_clone", + "repository": { + "url": "https://github.com/datacoves/balboa.git", + "git_url": "git@github.com:datacoves/balboa.git", + }, + "groups": { + "admins": ["ADMIN-TEST"], + "developers": ["DEVELOPER-TEST"], + "viewers": ["VIEWER-TEST"], + }, + } + }, + "features_enabled": { + "admin_users": True, + "admin_groups": True, + "admin_account": True, + "admin_billing": True, + "admin_projects": True, + "accounts_signup": False, + "admin_invitations": True, + "admin_environments": True, + "admin_connections": True, + "admin_integrations": True, + "admin_secrets": True, + "admin_service_credentials": True, + "user_profile_change_name": True, + "user_profile_delete_account": True, + "user_profile_change_credentials": True, + "user_profile_change_ssh_keys": True, + "user_profile_change_ssl_keys": True, + "codeserver_restart": True, + }, + } + + cluster = ClusterConfigLoader.load(params=cluster_params) + + env_config = { + "name": "Development", + "project": "analytics-local", + "type": "dev", + "release": self.get_release, + "services": { + "airbyte": {"enabled": False}, + "airflow": {"enabled": False}, + "code-server": {"enabled": True}, + "dbt-docs": {"enabled": False}, + "superset": {"enabled": False}, + }, + "dbt_home_path": "transform", + "dbt_profiles_dir": "automate", + } + + env = EnvironmentConfigLoader.load( + env_slug=utils_test.ENVIRONMENT_NAME, + env_config=env_config, + run_async=False, + ) + + airflow_config = { + "settings": { + "webserver_master_timeout": 300, + }, + "override_values": { + "webserver": { + "livenessProbe": { + "initialDelaySeconds": 300, + "periodSeconds": 10, + "timeoutSeconds": 30, + "failureThreshold": 30, + }, + "startupProbe": { + "failureThreshold": 30, + "periodSeconds": 10, + "timeoutSeconds": 20, + }, + }, + }, + } + + env.airflow_config.update(airflow_config) + Environment.objects.filter(id=env.id).update( + airflow_config=env.airflow_config + ) + + user.groups.set(Group.objects.all()) + workspace.sync(env, "register_environment.handle", False) + + conn_type = ConnectionType.objects.filter(name="Snowflake").first() + print("Conn type created:", conn_type) + + template = Template.objects.create( + name="Generate test username", + slug="generate_test_username", + description="Username literal", + content="svc_datacoves_platform_ci", + context_type=Template.CONTEXT_TYPE_NONE, + format=Template.FORMAT_NONE, + enabled_for=["ConnectionTemplate"], + ) + print("Template created:", template) + + conn_template = ConnectionTemplate.objects.create( + name="main", + connection_user=ConnectionTemplate.CONNECTION_USER_FROM_TEMPLATE, + project=Project.objects.first(), + type=conn_type, + connection_user_template=template, + connection_details={ + "account": "toa80779", + "warehouse": "wh_integration", + "database": "balboa_dev", + "role": "bot_integration", + "mfa_protected": False, + }, + ) + print("Conn template created:", conn_template) + + sql_hook = SQLHook.objects.get(slug="set_snowflake_user_public_key") + sql_hook.connection_overrides = { + "role": "SECURITYADMIN", + "user": utils_test.secrets["snowflake_service_account"][ + "template_db_user" + ], + "password": utils_test.secrets["snowflake_service_account"][ + "template_db_password" + ], + } + sql_hook.enabled = True + sql_hook.save() + print("SQLHook updated:", sql_hook) + + return cluster + + cluster = await create_cluster() + return cluster + + async def cluster_setup_wizard(self): + @database_sync_to_async + def create_cluster(): + load_releases() + + connections_types = [ + ("Snowflake", "snowflake"), + ("Redshift", "redshift"), + ("Bigquery", "bigquery"), + ("Databricks", "databricks"), + ] + for name, slug in connections_types: + conn_type = ConnectionType.objects.create(name=name, slug=slug) + print("Created connecion type:", conn_type) + + profile = Profile.objects.create(name="dbt", slug="default") + print("Created profile:", profile) + + plan = Plan.objects.create( + name="Starter - Monthly", + slug="starter-monthly", + billing_period="monthly", + trial_period_days=14, + kind="starter", + ) + print("Created plan:", plan) + + release = Release.objects.order_by("-released_at").first() + cluster = Cluster.objects.create(domain=self._domain, release=release) + cluster.features_enabled["accounts_signup"] = True + cluster.limits = { + "max_cluster_active_accounts": 20, + "max_cluster_active_trial_accounts": 10, + } + cluster.code_server_config = { + "overprovisioning": {"enabled": False, "replicas": "1"}, + "max_code_server_pods_per_node": 16, + "resources": { + "requests": {"memory": "250Mi", "cpu": "100m"}, + "limits": {"memory": "2Gi", "cpu": "1"}, + }, + } + cluster.save() + print("Created cluster:", cluster) + return cluster + + cluster = await create_cluster() + return cluster diff --git a/src/core/api/app/integration_tests/conftest.py b/src/core/api/app/integration_tests/conftest.py new file mode 100644 index 00000000..901c3f12 --- /dev/null +++ b/src/core/api/app/integration_tests/conftest.py @@ -0,0 +1,4 @@ +import os + +os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true" +os.environ["DJANGO_LIVE_TEST_SERVER_ADDRESS"] = "0:8000" diff --git a/src/core/api/app/integration_tests/setup_wizard/base.py b/src/core/api/app/integration_tests/setup_wizard/base.py new file mode 100644 index 00000000..a76fd562 --- /dev/null +++ b/src/core/api/app/integration_tests/setup_wizard/base.py @@ -0,0 +1,159 @@ +import asyncio + +from integration_tests.base_test import PlaywrightChannelsTestCase, utils_test +from playwright.async_api import expect + + +class SetupWizardBase(PlaywrightChannelsTestCase): + async def lauchpad_create_new_account(self): + """Select to create a new account""" + + await self.page.goto(f"http://{self.domain}/launchpad") + await self.page.locator(".chakra-container").wait_for(state="attached") + await self.screenshot(full_page=True) + await expect(self.page).to_have_title("Datacoves") + await expect( + self.page.get_by_role("heading", name="Welcome to Datacoves!") + ).to_be_visible() + + await self.page.get_by_role("button", name="Continue account setup").evaluate( + "el => el.click()" + ) + await expect( + self.page.get_by_role("heading", name="Account Setup") + ).to_be_visible() + await self.screenshot(full_page=True) + + async def setup_wizard_step_one(self, data: dict): + """Complete the step one""" + + await asyncio.sleep(2) # Wait until the accordion is fully open. + account_name_control = self.page.get_by_label("Account name") + await account_name_control.is_visible() + await account_name_control.fill(data["account_name"]) + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await self.screenshot(full_page=True) + await btn_next.evaluate("el => el.click()") + print("Step one completed") + + async def setup_wizard_step_two(self, data: dict): + """Complete the step two only with default option""" + await asyncio.sleep(2) # Wait until the accordion is fully open. + project_control = self.page.get_by_label("First Project") + await project_control.is_visible() + await project_control.fill(data["first_project"]) + await asyncio.sleep(2) + extract_and_load_data_control = self.page.locator( + "span", has_text="Extract and Load data" + ) + await extract_and_load_data_control.wait_for(state="visible") + await extract_and_load_data_control.wait_for(state="attached") + await extract_and_load_data_control.evaluate("el => el.click()") + await self.page.locator("span", has_text="Orchestrate").evaluate( + "el => el.click()" + ) + await self.page.locator("span", has_text="Analyze").evaluate("el => el.click()") + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await self.screenshot(full_page=True) + await btn_next.evaluate("el => el.click()") + print("Step two completed") + + async def setup_wizard_step_four(self, data: dict): + """Complete step four to Github reposotory upload ssh keys""" + + await asyncio.sleep(2) # Wait until the accordion is fully open. + git_repo_control = self.page.get_by_label("Git Repo SSH URL") + await git_repo_control.is_visible() + await git_repo_control.fill(data["git_repo_ssh_url"]) + await self.page.get_by_label("Release Branch").fill(data["release_branch"]) + + # Select Key type + await self.page.get_by_role("combobox", name="SSH key type").select_option( + "ed25519" + ) + await self.screenshot(full_page=True) + + # SSH develop + develop_ssh_input = self.page.get_by_label("SSH Development Key") + develop_ssh = await develop_ssh_input.input_value() + await expect(develop_ssh_input).to_contain_text("ssh") + + # SSH deploy + deploy_ssh_input = self.page.get_by_label("SSH Deploy Key") + deploy_ssh = await deploy_ssh_input.input_value() + await expect(deploy_ssh_input).to_contain_text("ssh") + await self.screenshot(full_page=True) + + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await btn_next.evaluate("el => el.click()") + + await self.page.locator("p", has_text="Testing connection...").wait_for( + state="attached" + ) + await self.screenshot(full_page=True) + + # Test message error ssh keys not configurated + msg_error = self.page.locator( + "div", has_text="Error accessing Git Repository" + ).first + await msg_error.wait_for(state="attached") + await expect(msg_error).to_contain_text("Error accessing Git Repository") + await self.screenshot(full_page=True) + + await utils_test.github_ssh_key_create(self.ssh_key_title, develop_ssh) + await utils_test.github_ssh_key_create(self.ssh_key_title, deploy_ssh) + + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await btn_next.evaluate("el => el.click()") + + await self.page.locator("p", has_text="Testing connection...").wait_for( + state="attached" + ) + await self.screenshot(full_page=True) + + # Test message success configurated + await self.page.get_by_text( + "Connection to the Git Repository was successful." + ).wait_for(state="attached", timeout=self.get_timeout(minutes=2)) + await self.screenshot(full_page=True) + print("Step four completed") + + async def setup_wizard_step_five(self): + """Complete step five about environment configuration""" + + await asyncio.sleep(2) # Wait until the accordion is fully open. + dbt_home_path_control = self.page.get_by_role("combobox", name="dbt home path") + await dbt_home_path_control.is_visible() + await dbt_home_path_control.select_option("transform") + await self.page.locator('input[id="dbt_profile"][name="dbt_profile"]').fill( + "default" + ) + await self.page.get_by_role("button", name="Finish").evaluate( + "el => el.click()" + ) + await self.screenshot(full_page=True) + print("Step five completed") + + async def setup_wizard_lauchpad(self, data: dict): + """Test Launchpad""" + + launchpad = self.page.get_by_role("heading", name="Launch Pad") + await launchpad.wait_for(timeout=self.get_timeout(minutes=1)) + await self.screenshot(full_page=True) + + """ + Expect: + 1. The name project + 2. Environment Development + """ + await expect(launchpad).to_be_visible() + await expect( + self.page.get_by_role("heading", name=data["first_project"]) + ).to_be_visible() + await expect( + self.page.get_by_role("heading", name="Development") + ).to_be_visible() diff --git a/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_bigquery.py b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_bigquery.py new file mode 100644 index 00000000..51e11257 --- /dev/null +++ b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_bigquery.py @@ -0,0 +1,81 @@ +import asyncio +import base64 + +import pytest +from integration_tests.base_test import utils_test +from integration_tests.setup_wizard.base import SetupWizardBase + +data = { + "account_name": "Acme Inc.", + "first_project": "Commercial Analytics", + "conn_type": "bigquery", + "dataset": "default", + "key_file": base64.b64decode( + utils_test.secrets["bigquery_service_account"]["Key_file_base64"] + ).decode("utf-8"), + "git_repo_ssh_url": "git@github.com:datacoves/balboa.git", + "release_branch": "main", +} + + +class SetupWizardWithBigqueryTest(SetupWizardBase): + def setUp(self): + self._subpath = f"setup_wizard/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_setup_wizard_with_bigquery(self): + """Test to setup an account with Bigquery""" + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + cluster = await self.cluster_setup_wizard() + + # Login + await utils_test.login( + page=self.page, + username=user.email, + password=user_session["password"], + domain=cluster.domain, + ) + + await self.lauchpad_create_new_account() + await self.setup_wizard_step_one(data=data) + await self.setup_wizard_step_two(data=data) + await self.setup_wizard_step_three() + await self.setup_wizard_step_four(data=data) + await self.setup_wizard_step_five() + await self.setup_wizard_lauchpad(data=data) + print("Setup wizard completed with Bigquery completed") + + except Exception: + await self.dump_pod_logs() + raise + + finally: + await self.asyncTearDown() + + async def setup_wizard_step_three(self): + """Complete the step three with Bigquery warehouse""" + + await asyncio.sleep(2) # Wait until the accordion is fully open. + await self.page.locator("#type").select_option(data["conn_type"]) + await self.page.get_by_label("Dataset").fill(data["dataset"]) + await self.page.get_by_label("Keyfile content").fill(data["key_file"]) + await self.screenshot(full_page=True) + + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await btn_next.evaluate("el => el.click()") + + await self.page.locator("p", has_text="Testing connection...").wait_for( + state="attached" + ) + await self.screenshot(full_page=True) + await self.page.get_by_text( + "Connection to the Data Warehouse was successful" + ).wait_for(state="attached") + print("Step three completed") diff --git a/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_databricks.py b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_databricks.py new file mode 100644 index 00000000..85f60b3d --- /dev/null +++ b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_databricks.py @@ -0,0 +1,82 @@ +import asyncio + +import pytest +from integration_tests.base_test import utils_test +from integration_tests.setup_wizard.base import SetupWizardBase + +data = { + "account_name": "Acme Inc.", + "first_project": "Commercial Analytics", + "conn_type": "databricks", + "host": "adb-1158734404438996.16.azuredatabricks.net", + "http_path": "/sql/1.0/warehouses/c66bb3ad1820877e", + "schema": "default", + "token": utils_test.secrets["databricks_service_account"]["token"], + "git_repo_ssh_url": "git@github.com:datacoves/balboa.git", + "release_branch": "main", +} + + +class SetupWizardWithDatabricksTest(SetupWizardBase): + def setUp(self): + self._subpath = f"setup_wizard/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_setup_wizard_with_databricks(self): + """Test to setup an account with Databricks""" + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + cluster = await self.cluster_setup_wizard() + + # Login + await utils_test.login( + page=self.page, + username=user.email, + password=user_session["password"], + domain=cluster.domain, + ) + + await self.lauchpad_create_new_account() + await self.setup_wizard_step_one(data=data) + await self.setup_wizard_step_two(data=data) + await self.setup_wizard_step_three() + await self.setup_wizard_step_four(data=data) + await self.setup_wizard_step_five() + await self.setup_wizard_lauchpad(data=data) + print("Setup wizard completed with Databricks completed") + + except Exception: + await self.dump_pod_logs() + raise + + finally: + await self.asyncTearDown() + + async def setup_wizard_step_three(self): + """Complete the step three with Redshift warehouse""" + + await asyncio.sleep(2) # Wait until the accordion is fully open. + await self.page.locator("#type").select_option(data["conn_type"]) + await self.page.get_by_label("Host").fill(data["host"]) + await self.page.get_by_label("HTTP Path").fill(data["http_path"]) + await self.page.get_by_label("Schema").fill(data["schema"]) + await self.page.get_by_label("Token").fill(data["token"]) + await self.screenshot(full_page=True) + + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await btn_next.evaluate("el => el.click()") + + await self.page.locator("p", has_text="Testing connection...").wait_for( + state="attached" + ) + await self.screenshot() + await self.page.get_by_text( + "Connection to the Data Warehouse was successful" + ).wait_for(state="attached") + print("Step three completed") diff --git a/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_redshift.py b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_redshift.py new file mode 100644 index 00000000..de809643 --- /dev/null +++ b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_redshift.py @@ -0,0 +1,84 @@ +import asyncio + +import pytest +from integration_tests.base_test import utils_test +from integration_tests.setup_wizard.base import SetupWizardBase + +data = { + "account_name": "Acme Inc.", + "first_project": "Commercial Analytics", + "conn_type": "redshift", + "host": "default.468004607141.us-west-2.redshift-serverless.amazonaws.com", + "database": "dev", + "schema": "sebastian", + "user": utils_test.secrets["redshift_service_account"]["user"], + "password": utils_test.secrets["redshift_service_account"]["password"], + "git_repo_ssh_url": "git@github.com:datacoves/balboa.git", + "release_branch": "main", +} + + +class SetupWizardWithRedshiftTest(SetupWizardBase): + def setUp(self): + self._subpath = f"setup_wizard/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_setup_wizard_with_redshift(self): + """Test to setup an account with Redshift""" + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + cluster = await self.cluster_setup_wizard() + + # Login + await utils_test.login( + page=self.page, + username=user.email, + password=user_session["password"], + domain=cluster.domain, + ) + + await self.lauchpad_create_new_account() + await self.setup_wizard_step_one(data=data) + await self.setup_wizard_step_two(data=data) + await self.setup_wizard_step_three() + await self.setup_wizard_step_four(data=data) + await self.setup_wizard_step_five() + await self.setup_wizard_lauchpad(data=data) + print("Setup wizard completed with Redshift completed") + + except Exception: + await self.dump_pod_logs() + raise + + finally: + await self.asyncTearDown() + + async def setup_wizard_step_three(self): + """Complete the step three with Redshift warehouse""" + + await asyncio.sleep(2) # Wait until the accordion is fully open. + await self.page.locator("#type").select_option(data["conn_type"]) + await self.page.get_by_label("Host").fill(data["host"]) + await self.page.get_by_label("Database").fill(data["database"]) + await self.page.get_by_label("Schema").fill(data["schema"]) + await self.page.locator("#user").fill(data["user"]) + await self.page.get_by_label("Password").fill(data["password"]) + await self.screenshot(full_page=True) + + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await btn_next.evaluate("el => el.click()") + + await self.page.locator("p", has_text="Testing connection...").wait_for( + state="attached" + ) + await self.page.screenshot(full_page=True) + await self.page.get_by_text( + "Connection to the Data Warehouse was successful" + ).wait_for(state="attached") + print("Step three completed") diff --git a/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_snowflake.py b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_snowflake.py new file mode 100644 index 00000000..cf2dab7c --- /dev/null +++ b/src/core/api/app/integration_tests/setup_wizard/test_setup_wizard_with_snowflake.py @@ -0,0 +1,90 @@ +import asyncio + +import pytest +from integration_tests.base_test import utils_test +from integration_tests.setup_wizard.base import SetupWizardBase + +data = { + "account_name": "Acme Inc.", + "first_project": "Commercial Analytics", + "conn_type": "snowflake", + "warehouse_account": "toa80779", + "warehouse": "wh_integration", + "database": "balboa_dev", + "role": "bot_integration", + "schema": "sebastian", + "user": utils_test.secrets["snowflake_service_account"]["user"], + "password": utils_test.secrets["snowflake_service_account"]["password"], + "git_repo_ssh_url": "git@github.com:datacoves/balboa.git", + "release_branch": "main", +} + + +class SetupWizardWithSnowflakeTest(SetupWizardBase): + def setUp(self): + self._subpath = f"setup_wizard/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_setup_wizard_with_snowflake(self): + """Test to setup an account with Snowflake""" + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + cluster = await self.cluster_setup_wizard() + + # Login + await utils_test.login( + page=self.page, + username=user.email, + password=user_session["password"], + domain=cluster.domain, + ) + + await self.lauchpad_create_new_account() + await self.setup_wizard_step_one(data=data) + await self.setup_wizard_step_two(data=data) + await self.setup_wizard_step_three() + await self.setup_wizard_step_four(data=data) + await self.setup_wizard_step_five() + await self.setup_wizard_lauchpad(data=data) + print("Setup wizard completed with Snowflake completed") + + except Exception: + await self.dump_pod_logs() + raise + + finally: + await self.asyncTearDown() + + async def setup_wizard_step_three(self): + """Complete the step three with Snowflake warehouse""" + + await asyncio.sleep(2) # Wait until the accordion is fully open. + await self.page.locator("#type").select_option(data["conn_type"]) + await self.page.get_by_label("Snowflake Account").fill( + data["warehouse_account"] + ) + await self.page.get_by_label("Warehouse").fill(data["warehouse"]) + await self.page.get_by_label("Database").fill(data["database"]) + await self.page.get_by_label("Role").fill(data["role"]) + await self.page.get_by_label("Schema").fill(data["schema"]) + await self.page.locator("#user").fill(data["user"]) + await self.page.get_by_label("Password").fill(data["password"]) + await self.screenshot(full_page=True) + + btn_next = self.page.get_by_role("button", name="Next") + await btn_next.wait_for(state="attached") + await btn_next.evaluate("el => el.click()") + + await self.page.locator("p", has_text="Testing connection...").wait_for( + state="attached" + ) + await self.screenshot(full_page=True) + await self.page.get_by_text( + "Connection to the Data Warehouse was successful" + ).wait_for(state="attached") + print("Step three completed") diff --git a/src/core/api/app/integration_tests/user_settings/base.py b/src/core/api/app/integration_tests/user_settings/base.py new file mode 100644 index 00000000..377681ca --- /dev/null +++ b/src/core/api/app/integration_tests/user_settings/base.py @@ -0,0 +1,27 @@ +import re + +from integration_tests.utils_test import ONE_MINUTE_IN_MS, github_ssh_key_create +from playwright.async_api import Page, expect + + +async def git_ssh_key_test_connection( + page: Page, git_repo_ssh_url: str, is_success: bool, ssh_key_title: str +) -> int: + ssh_input = page.get_by_text(re.compile("^ssh-ed25519", re.IGNORECASE)) + ssh = await ssh_input.input_value() + await expect(ssh_input).to_contain_text("ssh-ed25519") + + await github_ssh_key_create(title=ssh_key_title, ssh_key=ssh) + + await page.get_by_role("row", name=f"{git_repo_ssh_url}").get_by_role( + "button" + ).evaluate("el => el.click()") + + if is_success: + await expect(page.get_by_role("button", name="Re-test")).to_be_visible( + timeout=ONE_MINUTE_IN_MS + ) + else: + await expect(page.get_by_text("Error accessing Git Repository")).to_be_visible( + timeout=ONE_MINUTE_IN_MS + ) diff --git a/src/core/api/app/integration_tests/user_settings/test_user_settings_db_conn_with_snowflake.py b/src/core/api/app/integration_tests/user_settings/test_user_settings_db_conn_with_snowflake.py new file mode 100644 index 00000000..3bc7dd55 --- /dev/null +++ b/src/core/api/app/integration_tests/user_settings/test_user_settings_db_conn_with_snowflake.py @@ -0,0 +1,322 @@ +import asyncio + +import pytest +from integration_tests.base_test import PlaywrightChannelsTestCase, utils_test +from integration_tests.user_settings.base import git_ssh_key_test_connection +from playwright.async_api import expect + +data = { + "git_repo_ssh_url": "git@github.com:datacoves/balboa.git", + "db_name": "test_db_name", + "db_conn_template": "1", + "db_auth_type": "key", + "db_user": utils_test.secrets["snowflake_service_account"]["user"], + "db_password": utils_test.secrets["snowflake_service_account"]["password"], +} + + +class UserSettingsDbConnectionWithSnowflakeTest(PlaywrightChannelsTestCase): + def setUp(self): + self._subpath = f"user_settings/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_user_settings_db_connection_with_snowflake(self): + """ + Test case user settings to create connection with Snowflake + + Steps: + 1. Generate SSH key to clone git repository. + 2. Auto-generate SSH key to database connection. + 3. Provide wrong SSH key to database connection. + 4. Testing database connection with credential username and password. + 5. Provide success SSH key to database connection. + 6. Testing test connection and edit buttons works. + 7. Go to workbench (transform tab) and check that git repo was cloned successfully + 8. Run dbt debug on terminal to validate db connection worked ok + """ + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + cluster = await self.cluster_setup(user=user) + + # Login + await utils_test.login( + page=self.page, + username=user.email, + password=user_session["password"], + domain=cluster.domain, + ) + + # Go to Launchpad + await self.page.goto("https://datacoveslocal.com/launchpad") + header_launchpad = self.page.get_by_role("heading", name="Launch Pad") + await header_launchpad.wait_for(timeout=self.get_timeout(minutes=1)) + btn_open_env = self.page.get_by_role("button", name="Open", exact=True) + await btn_open_env.wait_for( + state="attached", timeout=self.get_timeout(minutes=5) + ) + await self.screenshot() + + # Go to Profile Settings + btn_open_user_menu = self.page.get_by_role("button", name="Open user menu") + await btn_open_user_menu.wait_for(state="attached") + await btn_open_user_menu.click() + await self.page.get_by_role("menuitem", name="Settings").click() + await self.page.get_by_role("heading", name="Profile Settings").wait_for() + await self.screenshot() + + # Testing + await self.git_ssh_key_gen() + await self.auto_generate_ssh_key() + await self.delete_connection() + await self.provide_error_ssh_key() + await self.connection_with_data_success() + await self.delete_connection(delete_ssh_key=False) + await self.provide_success_ssh_key() + await self.btn_connection_test() + await self.btn_edit_test() + await self.go_to_transform_tab() + + print("Testing user settings database connection completed") + + except Exception: + await self.dump_pod_logs([("code-server", "code-server"), ("pomerium",)]) + raise + + finally: + await self.asyncTearDown() + + async def git_ssh_key_gen(self): + """Test generate ssh key to clone git repository""" + await asyncio.sleep(1) + tab_git_ssh_keys = self.page.get_by_role("tab", name="Git SSH Keys") + await tab_git_ssh_keys.wait_for(state="visible") + await tab_git_ssh_keys.wait_for(state="attached") + await tab_git_ssh_keys.evaluate("el => el.click()") + await self.page.locator("section").filter( + has_text="Git SSH keysAdd this SSH key to your git server account to clone your repos." + ).get_by_role("button", name="Add").click() + await self.page.get_by_role("menuitem", name="Auto-generate key pairs").click() + await self.clean_toast() + await self.page.get_by_role("button", name="COPY").click() + await expect( + self.page.get_by_text("SSH key copied to clipboard") + ).to_be_visible() + await self.clean_toast() + await git_ssh_key_test_connection( + page=self.page, + git_repo_ssh_url=data["git_repo_ssh_url"], + is_success=True, + ssh_key_title=self.ssh_key_title, + ) + await self.screenshot() + await self.clean_toast() + + async def auto_generate_ssh_key(self): + """Autogenerate SSH key to testing connection""" + tab_database_auth_keys = self.page.get_by_role("tab", name="Database Auth Keys") + await tab_database_auth_keys.wait_for(state="visible") + await tab_database_auth_keys.wait_for(state="attached") + await tab_database_auth_keys.evaluate("el => el.click()") + await self.page.locator("section").filter( + has_text="Database authorization keysAdd this authorization key" + ).get_by_role("button", name="Add").click() + await self.page.get_by_role("menuitem", name="Auto-generate key pairs").click() + await self.page.get_by_text("Key pairs successfully created").wait_for( + state="attached" + ) + await self.clean_toast() + await self.screenshot(full_page=True) + await self.connection_with_ssh_key_success() + await self.screenshot(full_page=True) + + async def connection_with_ssh_key_success(self): + """Makes the testing connection with the SSH key and validate the success message""" + await self.page.get_by_role("tab", name="Database Connections").click() + await self.page.locator("section").filter( + has_text="Database connectionsConfigure Database connections for each project environment." + ).get_by_role("button", name="Add").click() + conn_section = self.page.locator("section").filter( + has_text="New connection for" + ) + await conn_section.locator("input").fill(data["db_name"]) + await conn_section.get_by_role( + "combobox", name="Connection Template" + ).select_option(index=1) + await conn_section.get_by_role("combobox", name="Auth type").select_option( + data["db_auth_type"] + ) + await self.screenshot() + await conn_section.get_by_role("combobox", name="Public key").select_option( + index=1 + ) + await self.screenshot() + await self.page.get_by_role("button", name="Save").click() + await self.screenshot() + await self.page.get_by_text( + "Connection to the Data Warehouse was successful" + ).wait_for(state="attached") + await self.screenshot() + await expect( + self.page.get_by_role("row", name=f"{data['db_name']} snowflake") + ).to_be_visible() + await self.screenshot() + + async def delete_connection(self, delete_ssh_key=True): + """Delete database connection""" + database_conn = self.page.get_by_role("tab", name="Database Connections") + await database_conn.wait_for(state="visible") + await database_conn.wait_for(state="attached") + await database_conn.evaluate("el => el.click()") + tested_msg = self.page.get_by_role("row", name=f"{data['db_name']} snowflake") + await tested_msg.wait_for(state="attached") + await tested_msg.get_by_role("button").nth(2).click() + delete_btn = self.page.get_by_role("button", name="Delete") + await delete_btn.wait_for(state="attached") + await delete_btn.click() + + if delete_ssh_key: + # Delete private key + await self.page.get_by_role("tab", name="Database Auth Keys").click() + await self.page.locator("section").filter( + has_text="Database authorization keysAdd this authorization key to your database account" + ).get_by_role("button", name="Delete key").click() + delete_btn = self.page.get_by_role("button", name="Delete") + await delete_btn.wait_for(state="attached") + await delete_btn.click() + + async def provide_error_ssh_key(self): + """Testing connection with SSH key wrong and validate the error message""" + await self.page.get_by_role("tab", name="Database Auth Keys").click() + await self.page.locator("section").filter( + has_text="Database authorization keysAdd this authorization key" + ).get_by_role("button", name="Add").click() + await self.page.get_by_role("menuitem", name="Provide private key").click() + await self.page.get_by_label("Private key*").fill("ssh key dummy") + await self.page.get_by_role("button", name="Save").click() + await self.page.get_by_text("Error Creating key pairs").wait_for( + state="attached" + ) + await self.screenshot(full_page=True) + await self.page.get_by_role("button", name="Cancel").click() + + async def connection_with_data_success(self): + """Testing connection with success data and validate the success message""" + await self.page.get_by_role("tab", name="Database Connections").click() + await self.page.locator("section").filter( + has_text="Database connectionsConfigure Database connections for each project environment." + ).get_by_role("button", name="Add").click() + conn_section = self.page.locator("section").filter( + has_text="New connection for" + ) + await conn_section.locator("input").fill(data["db_name"]) + await self.page.get_by_role( + "combobox", name="Connection Template" + ).select_option(index=1) + # page.get_by_role("combobox", name="Auth type").select_option(index=0) # Password + await self.page.get_by_role("combobox", name="Auth type").press("Tab") + await self.page.locator("#password").fill(data["db_password"]) + await self.page.get_by_role("button", name="Save").click() + await self.page.get_by_text("Testing connection...").wait_for( + state="attached", + ) + await self.screenshot(full_page=True) + await self.page.get_by_text( + "Connection to the Data Warehouse was successful" + ).wait_for(state="attached") + await expect( + self.page.get_by_role("row", name=f"{data['db_name']} snowflake") + ).to_be_visible() + await self.screenshot(full_page=True) + + async def provide_success_ssh_key(self): + """Provide SSH key to testing connection""" + await self.page.get_by_role("tab", name="Database Auth Keys").click() + await self.page.locator("section").filter( + has_text="Database authorization keysAdd this authorization key" + ).get_by_role("button", name="Add").click() + await self.page.get_by_role("menuitem", name="Provide private key").click() + private_key = await utils_test.gen_private_key() + await self.page.get_by_label("Private key*").fill(private_key) + await self.page.get_by_role("button", name="Save").click() + await self.page.get_by_text("Key pairs successfully created").wait_for( + state="attached" + ) + await self.clean_toast() + await self.screenshot(full_page=True) + await self.connection_with_ssh_key_success() + await self.screenshot(full_page=True) + + async def btn_connection_test(self): + """Testing that test connection button test works""" + await self.page.get_by_role("tab", name="Database Connections").click() + await self.page.get_by_role( + "row", name=f"{data['db_name']} snowflake" + ).get_by_role("button").first.click() + await expect( + self.page.get_by_text("Connection to the Data Warehouse was successful") + ).to_be_visible(timeout=self.get_timeout(minutes=1)) + await self.clean_toast() + + async def btn_edit_test(self): + """Testing that edit connection button works""" + await self.page.get_by_role("tab", name="Database Connections").click() + await self.page.get_by_role( + "row", name=f"{data['db_name']} snowflake" + ).get_by_role("button").nth(1).click() + await expect( + self.page.get_by_text("Edit connection for Analytics Development") + ).to_be_visible() + await self.page.get_by_role("button", name="Cancel").click() + + async def go_to_transform_tab(self): + await asyncio.sleep(300) + await self.page.goto("https://datacoveslocal.com/launchpad") + btn_open_env = self.page.get_by_role("button", name="Open", exact=True) + await btn_open_env.wait_for( + state="attached", timeout=self.get_timeout(minutes=5) + ) + await self.screenshot() + await btn_open_env.click() + + await self.page.frame_locator('iframe[name="docs"]').get_by_role( + "heading", name="Welcome to the Datacoves Documentation" + ).wait_for(state="attached", timeout=self.get_timeout(minutes=5)) + + await self.screenshot() + + # Open VS Code + await self.page.get_by_text("Transform").click() + transform_iframe = self.page.frame_locator('iframe[name="transform"]') + await transform_iframe.get_by_role("treeitem", name=".gitignore").wait_for( + state="attached", timeout=self.get_timeout(minutes=7) + ) + side_bar_explorer = transform_iframe.get_by_role( + "heading", name="Explorer", exact=True + ) + await side_bar_explorer.wait_for(state="attached") + await side_bar_explorer.click() + await self.screenshot() + + terminal = transform_iframe.locator(".terminal-widget-container") + await terminal.wait_for(state="attached", timeout=self.get_timeout(minutes=1)) + await terminal.click() + + terminal_input = transform_iframe.get_by_role("textbox", name="Terminal") + await terminal_input.wait_for( + state="attached", timeout=self.get_timeout(minutes=1) + ) + await terminal_input.fill("dbt debug") + await terminal_input.press("Enter") + + await asyncio.sleep(60) + await self.screenshot() + + output_cmd = await transform_iframe.locator("div.xterm-rows").inner_text() + print("dbt debug:", output_cmd) + # FIXME DCV-2022: dbt-debug's 'all checks passed' output differs between local and github runs + assert ("All checks passed" in output_cmd) or ("Allcheckspassed" in output_cmd) diff --git a/src/core/api/app/integration_tests/user_settings/test_user_settings_git_conn.py b/src/core/api/app/integration_tests/user_settings/test_user_settings_git_conn.py new file mode 100644 index 00000000..01fac71b --- /dev/null +++ b/src/core/api/app/integration_tests/user_settings/test_user_settings_git_conn.py @@ -0,0 +1,186 @@ +import pytest +from integration_tests.base_test import PlaywrightChannelsTestCase, utils_test +from integration_tests.user_settings.base import git_ssh_key_test_connection +from playwright.async_api import expect + +data = {"git_repo_ssh_url": "git@github.com:datacoves/balboa.git"} + + +class UserSettingsGitConnectionTest(PlaywrightChannelsTestCase): + def setUp(self): + self._subpath = f"user_settings/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_user_settings_git_connection(self): + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + cluster = await self.cluster_setup(user=user) + + # Login + await utils_test.login( + page=self.page, + username=user.email, + password=user_session["password"], + domain=cluster.domain, + ) + + # Go to Launchpad + await self.page.goto("https://datacoveslocal.com/launchpad") + header_launchpad = self.page.get_by_role("heading", name="Launch Pad") + await header_launchpad.wait_for(timeout=self.get_timeout(minutes=1)) + btn_open_env = self.page.get_by_role("button", name="Open", exact=True) + await btn_open_env.wait_for( + state="attached", timeout=self.get_timeout(minutes=5) + ) + await self.screenshot() + + # Go to Profile Settings + btn_open_user_menu = self.page.get_by_role("button", name="Open user menu") + await btn_open_user_menu.wait_for( + state="attached", timeout=self.get_timeout(minutes=5) + ) + await btn_open_user_menu.click() + menu_user_settings = self.page.get_by_role("menuitem", name="Settings") + await menu_user_settings.is_visible() + await menu_user_settings.click() + header_user_settings = self.page.get_by_role( + "heading", name="Profile Settings" + ) + await header_user_settings.wait_for() + await self.screenshot() + + # Tests with user SSH Keys + await self.when_provide_git_ssh_key_success() + await self.when_provide_git_ssh_key_wrong() + await self.when_git_ssh_key_is_auto_generate() + print("Testing user settings git completed") + + except Exception: + await self.dump_pod_logs([("code-server", "code-server"), ("pomerium",)]) + raise + + finally: + await self.asyncTearDown() + + async def when_provide_git_ssh_key_success(self): + """Testing git connection with SSH key provide and success message""" + await self.page.get_by_role("tab", name="Git SSH Keys").click() + btn_add = ( + self.page.locator("section") + .filter( + has_text="Git SSH keysAdd this SSH key to your git server account to clone your repos." + ) + .get_by_role("button", name="Add") + ) + await btn_add.click() + await self.page.get_by_role("menuitem", name="Provide private key").click() + ssh_key = await utils_test.gen_open_ssh_key() + await self.page.get_by_label("Private key*").fill(ssh_key) + await self.screenshot() + await self.page.get_by_role("button", name="Save").click() + await self.page.get_by_text("SSH key pairs successfully created").wait_for( + state="attached" + ) + await self.clean_toast() + await git_ssh_key_test_connection( + page=self.page, + git_repo_ssh_url=data["git_repo_ssh_url"], + is_success=True, + ssh_key_title=self.ssh_key_title, + ) + await self.screenshot() + await self.clean_toast() + + # Delete ssh key + await self.page.get_by_role("button", name="Delete key").click() + delete_btn = self.page.get_by_role("button", name="Delete") + await delete_btn.wait_for(state="attached") + await delete_btn.click() + await self.page.get_by_text("SSH Key successfully deleted.").wait_for( + state="attached" + ) + await self.screenshot() + await self.clean_toast() + + async def when_provide_git_ssh_key_wrong(self): + """Testing git connection with SSH key provide with extra lines and malformat""" + + await self.page.get_by_role("tab", name="Git SSH Keys").click() + btn_add = ( + self.page.locator("section") + .filter( + has_text="Git SSH keysAdd this SSH key to your git server account to clone your repos." + ) + .get_by_role("button", name="Add") + ) + btn_provide_ssh = self.page.get_by_role("menuitem", name="Provide private key") + private_key_input = self.page.get_by_label("Private key*") + + # SSH Key provided has new lines + await btn_add.click() + await btn_provide_ssh.click() + ssh_key = await utils_test.gen_open_ssh_key() + await private_key_input.fill(f"\n\n{ssh_key}\n") + await self.screenshot() + await self.page.get_by_role("button", name="Save").click() + await self.page.get_by_text("SSH key pairs successfully created").wait_for( + state="attached" + ) + await self.screenshot() + await self.clean_toast() + + await git_ssh_key_test_connection( + page=self.page, + git_repo_ssh_url=data["git_repo_ssh_url"], + is_success=False, + ssh_key_title=self.ssh_key_title, + ) + await self.screenshot() + await self.clean_toast() + + await self.page.get_by_role("button", name="Delete key").click() + delete_btn = self.page.get_by_role("button", name="Delete") + await delete_btn.wait_for(state="attached") + await delete_btn.click() + await self.page.get_by_text("SSH Key successfully deleted.").wait_for( + state="attached" + ) + await self.screenshot() + + # SSH Key provided is wrong + await btn_add.click() + await btn_provide_ssh.click() + await private_key_input.fill("ssh test dummy wrong") + await self.page.get_by_role("button", name="Save").click() + await self.page.get_by_text("Error Creating SSH key pairs").wait_for( + state="attached" + ) + await self.screenshot() + await self.page.get_by_role("button", name="Cancel").click() + await self.clean_toast() + + async def when_git_ssh_key_is_auto_generate(self): + """Testing git connection with SSH key auto-generated and success message""" + + await self.page.get_by_role("tab", name="Git SSH Keys").click() + await self.page.locator("section").filter( + has_text="Git SSH keysAdd this SSH key to your git server account to clone your repos." + ).get_by_role("button", name="Add").click() + await self.page.get_by_role("menuitem", name="Auto-generate key pairs").click() + await self.clean_toast() + await self.page.get_by_role("button", name="COPY").click() + await expect( + self.page.get_by_text("SSH key copied to clipboard") + ).to_be_visible() + await self.clean_toast() + await git_ssh_key_test_connection( + page=self.page, + git_repo_ssh_url=data["git_repo_ssh_url"], + is_success=True, + ssh_key_title=self.ssh_key_title, + ) + await self.screenshot() diff --git a/src/core/api/app/integration_tests/utils_test.py b/src/core/api/app/integration_tests/utils_test.py new file mode 100644 index 00000000..8c59c677 --- /dev/null +++ b/src/core/api/app/integration_tests/utils_test.py @@ -0,0 +1,249 @@ +import json +import time +from http import HTTPStatus +from pathlib import Path + +import requests +import yaml +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import ed25519, rsa +from kubernetes.client.rest import ApiException as k8_api_exception +from playwright.async_api import Page +from rich.console import Console +from rich.table import Column, Table + +import lib.kubernetes.client as k8s_client + +ONE_MINUTE_IN_MS = 60 * 1000 +DEFAULT_TIMEOUT = ONE_MINUTE_IN_MS * 2 # 3 mins +ENVIRONMENT_NAME = "tst001" +NAMESPACE_NAME = f"dcw-{ENVIRONMENT_NAME}" + +"""Reads the secrets storage in secrets/integration_tests.yaml""" +with open("/tmp/integration_tests.yaml", "r") as file: + secrets = yaml.safe_load(file) + +github_base_url = "https://api.github.com" +github_api_version = "2022-11-28" +github_access_token = secrets["github_service_account"]["access_token"] +github_headers = { + "Accept": "application/vnd.github+json", + "Authorization": f"Bearer {github_access_token}", + "X-GitHub-Api-Version": github_api_version, +} + +kc = k8s_client.Kubectl() + + +async def login(page: Page, username: str, password: str, domain: str): + """Login to the Django admin form""" + + await page.goto(f"https://api.{domain}/panel/login/") + + title_expected = "Log in | Grappelli" + title = await page.title() + assert ( + title == title_expected + ), f"The title page '{title}', but we expected '{title_expected}'" + + await page.get_by_label("Email:").fill(username) + await page.get_by_label("Password:").fill(password) + await page.get_by_role("button", name="Log in").click() + title_expected = "Cluster administration | Grappelli" + title = await page.title() + assert ( + title == title_expected + ), f"The title page '{title}', but we expected '{title_expected}'" + + +async def gen_open_ssh_key() -> str: + private_key = ed25519.Ed25519PrivateKey.generate() + private_bytes = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.OpenSSH, + encryption_algorithm=serialization.NoEncryption(), + ) + + """ + public_key = private_key.public_key() + public_bytes = public_key.public_bytes( + encoding=serialization.Encoding.OpenSSH, + format=serialization.PublicFormat.OpenSSH + ) + """ + + return private_bytes.decode("utf-8") + + +async def gen_private_key(): + KEY_SIZE = 2048 + PUBLIC_EXP = 65537 + private_key = rsa.generate_private_key( + public_exponent=PUBLIC_EXP, key_size=KEY_SIZE, backend=default_backend() + ) + private_key_str = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ).decode("utf-8") + + return private_key_str + + +async def github_ssh_key_create(title: str, ssh_key: str) -> int: + """Creates SSH key in Github""" + r = requests.post( + f"{github_base_url}/user/keys", + headers=github_headers, + data=json.dumps({"title": title, "key": ssh_key}), + ) + + if r.ok: + ssh_id = r.json()["id"] + print(f"Github SSH title={title} id={ssh_id} created: {ssh_key}") + return ssh_id + else: + print(r.text) + raise Exception(f"Could not create ssh {title}") + + +def github_ssh_key_delete(ssh_id: int): + """Delete SSH key in Github by id""" + r = requests.delete(f"{github_base_url}/user/keys/{ssh_id}", headers=github_headers) + if r.ok: + print(f"Github SSH id={ssh_id} deleted") + else: + print(r.text) + raise Exception(f"Could not delete ssh {ssh_id}") + + +def github_ssh_delete_all_by_title(title: str): + r = requests.get(f"{github_base_url}/user/keys", headers=github_headers) + if r.ok: + for ssh_key in r.json(): + ssh_id = ssh_key["id"] + ssh_title = ssh_key["title"] + if ssh_title.startswith(title): + github_ssh_key_delete(ssh_id=ssh_id) + else: + print(r.text) + + +def check_namespace_terminated(): + """ + Returns whether test namespace was terminated or not + """ + + def _namespace_terminated(): + try: + kc.read_namespace(NAMESPACE_NAME) + return False # Namespace still exists + except k8_api_exception as e: + if e.status == HTTPStatus.NOT_FOUND: + return True # Namespace has been terminated + raise + + t = 20 + while not _namespace_terminated() and t > 0: + t -= 1 + print(f"Namespace '{NAMESPACE_NAME}' is still active. (attempt {t})") + time.sleep(10) + + if t == 5: + # To force delete the namespace + kubectl = k8s_client.Kubectl() + payload = { + "metadata": {"name": NAMESPACE_NAME}, + "spec": {"finalizers": None}, + } + kubectl.CoreV1Api.replace_namespace_finalize( + name=NAMESPACE_NAME, body=payload + ) + + if t == 0: + raise Exception( + f"Namespace '{NAMESPACE_NAME}' couldn't be terminated. Check logs" + ) + + +async def dump_pod_status(test_subpath, namespace=NAMESPACE_NAME): + """ + Receives test's subpath, k8s namespace, pod name-like and optionally container + Returns the logs of the pod + """ + + dump_path = Path(f"integration_tests/output/{test_subpath}/logs/") + dump_path.mkdir(parents=True, exist_ok=True) + dump_pod_status_path = dump_path / "pods_status.txt" + dump_events_path = dump_path / "events.txt" + kubectl = k8s_client.Kubectl() + + try: + table = Table("pod", "namespace", "container", "state", title="Pods status") + namespace_pods = kubectl.CoreV1Api.list_namespaced_pod(namespace) + for pod in namespace_pods.items: + pod_name = f"{pod.metadata.name}" + if pod.status and pod.status.container_statuses: + for container_status in pod.status.container_statuses: + cointainer_name = container_status.name + state_obj = container_status.state + state = "unknown" + if pod.metadata.deletion_timestamp: + state = "terminating" + elif state_obj.running and container_status.ready: + state = "running" + elif state_obj.running and not container_status.ready: + state = "starting" + elif state_obj.waiting: + state = "waiting" + elif state_obj.terminated: + state = "terminated" + + table.add_row(pod_name, namespace, cointainer_name, state) + + with open(dump_pod_status_path, "w") as f: + console = Console(file=f, width=500) + console.print(table) + + table = Table("kind", "object", Column(header="message"), title="Events") + events = kubectl.CoreV1Api.list_namespaced_event(namespace=namespace) + for event in events.items: + table.add_row( + event.involved_object.kind, event.involved_object.name, event.message + ) + + with open(dump_events_path, "w") as f: + console = Console(file=f, width=500) + console.print(table) + + except k8_api_exception as err: + print(str(err)) + + +async def dump_pod_logs( + test_subpath, pod_name, container=None, namespace=NAMESPACE_NAME +): + """ + Receives test's subpath, k8s namespace, pod name-like and optionally container + Returns the logs of the pod + """ + + dump_path = Path(f"integration_tests/output/{test_subpath}/logs/") + dump_path.mkdir(parents=True, exist_ok=True) + filename = f"{pod_name}_{container}.txt" if container else f"{pod_name}.txt" + dump_path = dump_path / filename + kubectl = k8s_client.Kubectl() + + try: + namespace_pods = kubectl.CoreV1Api.list_namespaced_pod(namespace=namespace) + for pod in namespace_pods.items: + if pod_name in pod.metadata.name: + with open(dump_path, "w") as dump_file: + dump_file.write( + kubectl.CoreV1Api.read_namespaced_pod_log( + pod.metadata.name, namespace, container=container + ) + ) + except k8_api_exception: + print(f"{namespace}/{pod_name} not found.") diff --git a/src/core/api/app/integration_tests/workbench/base.py b/src/core/api/app/integration_tests/workbench/base.py new file mode 100644 index 00000000..ce076e8d --- /dev/null +++ b/src/core/api/app/integration_tests/workbench/base.py @@ -0,0 +1,114 @@ +import asyncio +import re +from enum import Enum + +from integration_tests.base_test import PlaywrightChannelsTestCase, utils_test + + +class WorkbenchServicesEnum(Enum): + AIRFLOW = "ORCHESTRATE" + AIRBYTE = "LOAD" + SUPERSET = "ANALYZE" + DOCS = "OBSERVE > Docs" + + +class WorkbenchBase(PlaywrightChannelsTestCase): + async def go_to_launchpad(self, email: str = None, password: str = None): + # Login + if email and password: + await utils_test.login( + page=self.page, + username=email, + password=password, + domain=self.domain, + ) + + await self.page.goto(f"https://{self.domain}/launchpad") + btn_open_env = self.page.get_by_role("button", name="Open", exact=True) + await btn_open_env.wait_for( + state="attached", timeout=self.get_timeout(minutes=5) + ) + await self.screenshot(delay=2) + + async def get_into_workbench(self, service: WorkbenchServicesEnum): + await self.go_to_launchpad() + btn_open_env = self.page.get_by_role("button", name="Open", exact=True) + await btn_open_env.wait_for( + state="attached", timeout=self.get_timeout(minutes=5) + ) + + # Wait for service pods to start + if service == WorkbenchServicesEnum.AIRBYTE: + time_sleep = 300 + else: + time_sleep = 240 + + await asyncio.sleep(time_sleep) # Increase if it's needed + await btn_open_env.click() + await self.page.frame_locator('iframe[name="docs"]').get_by_role( + "heading", name="Welcome to the Datacoves Documentation" + ).wait_for(state="attached", timeout=self.get_timeout(minutes=7)) + await self.screenshot() + + async def enable_project(self): + """Change Repo cloning to SSH, add key to Github, and test Project connection""" + await self.page.get_by_role("link", name="Projects").click() + await self.page.wait_for_selector( + "button.chakra-button.css-d99nyo", state="visible" + ) + project_edit_button = ( + self.page.get_by_role("cell", name="Test").get_by_role("button").nth(1) + ) + await project_edit_button.wait_for(state="visible") + await project_edit_button.wait_for(state="attached") + await self.screenshot() + await project_edit_button.evaluate("el => el.click()") + clone_strategy_combobox = self.page.get_by_role( + "combobox", name="Clone strategy" + ) + await clone_strategy_combobox.is_visible() + await clone_strategy_combobox.select_option("ssh_clone") + await self.screenshot() + develop_ssh = await self.page.get_by_text("ssh-ed25519").input_value() + await utils_test.github_ssh_key_create(self.ssh_key_title, develop_ssh) + btn_save = self.page.get_by_role("button", name="Save Changes") + await btn_save.wait_for(state="visible") + await btn_save.wait_for(state="attached") + await btn_save.evaluate("el => el.click()") + await self.screenshot() + await self.page.get_by_role("cell", name="connection template").wait_for( + state="attached", timeout=self.get_timeout(minutes=1) + ) + await self.screenshot() + print("Project successfully tested") + + async def enable_environment_service( + self, service: WorkbenchServicesEnum, needs_extra_config=False + ): + await self.page.get_by_role("link", name="Environments").click() + await self.page.get_by_role( + "row", name="Analytics Development 0 service connections" + ).get_by_role("button").first.click() + await self.page.get_by_role("tab", name="Stack Services").click() + + if service != WorkbenchServicesEnum.DOCS: + # Turn VSCode off we do not need it. + await self.page.locator("div").filter( + has_text=re.compile( + r"^TRANSFORM \+ OBSERVE > Local DocsPowered by VS Code and dbt$" + ) + ).locator("span").nth(1).click() + + await self.page.get_by_role("group").filter(has_text=service.value).locator( + "span" + ).nth(1).click() + await self.screenshot() + await self.page.get_by_role("tab", name="General settings").click() + await self.page.get_by_label("dbt profile name").fill("default") + + if not needs_extra_config: + await self.page.get_by_role("button", name="Save Changes").click() + await self.page.get_by_role( + "row", name="Analytics Development 0 service connections" + ).wait_for(state="attached") + await self.screenshot() diff --git a/src/core/api/app/integration_tests/workbench/test_workbench_airbyte.py b/src/core/api/app/integration_tests/workbench/test_workbench_airbyte.py new file mode 100644 index 00000000..8f7103a7 --- /dev/null +++ b/src/core/api/app/integration_tests/workbench/test_workbench_airbyte.py @@ -0,0 +1,57 @@ +import pytest +from integration_tests.workbench.base import WorkbenchBase, WorkbenchServicesEnum +from playwright.async_api import expect + + +class WorkbenchAirbyteTest(WorkbenchBase): + def setUp(self): + self._subpath = f"workbench/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_workbench_airbyte(self): + """ + Test that enables the Airbyte stack service on Environment page + and ensures Airbyte is available and accessible on the Workbench + + Steps: + 1. Login + 2. Go to Launchpad + 3. Go to Projects admin and enable Project by creating a SSH Key and testing it + 4. Go to Environments admin and toggle Airbyte stack service + 5. Go to Launchpad and Workbench + 6. Go to Airbyte + 7. Assert Airbyte landing content is present + """ + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + await self.cluster_setup(user=user) + + await self.go_to_launchpad(user.email, user_session["password"]) + await self.enable_project() + await self.enable_environment_service(service=WorkbenchServicesEnum.AIRBYTE) + await self.go_to_load_tab() + print("Workbench Airbyte completed") + + except Exception: + pods = [ + ("pomerium",), + ("airbyte-server",), + ("airbyte-webapp",), + ("airbyte-worker",), + ] + await self.dump_pod_logs(pods=pods) + raise + + finally: + await self.asyncTearDown() + + async def go_to_load_tab(self): + await self.get_into_workbench(service=WorkbenchServicesEnum.AIRBYTE) + await self.page.goto(f"https://airbyte-tst001.{self.domain}") + await self.screenshot(delay=5) + await expect(self.page.get_by_text("Specify your preferences")).to_be_visible() diff --git a/src/core/api/app/integration_tests/workbench/test_workbench_airflow.py b/src/core/api/app/integration_tests/workbench/test_workbench_airflow.py new file mode 100644 index 00000000..5a6f4c7f --- /dev/null +++ b/src/core/api/app/integration_tests/workbench/test_workbench_airflow.py @@ -0,0 +1,76 @@ +import pytest +from integration_tests.workbench.base import WorkbenchBase, WorkbenchServicesEnum +from playwright.async_api import expect + + +class WorkbenchAirflowTest(WorkbenchBase): + def setUp(self): + self._subpath = f"workbench/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_workbench_airflow(self): + """ + Test that enables the Airflow stack service on Environment page + and ensures Airflow is available and accessible on the Workbench + + Steps: + 1. Login + 2. Go to Launchpad + 3. Go to Projects admin and enable Project by creating a SSH Key and testing it + 4. Go to Environments admin and toggle Airflow stack service + 5. Go to Launchpad and Workbench + 6. Go to Airflow and 'sign in with Datacoves' + 7. Assert Airflow landing content is present + """ + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + await self.cluster_setup(user=user) + + await self.go_to_launchpad(user.email, user_session["password"]) + await self.enable_project() + await self.enable_environment_service( + service=WorkbenchServicesEnum.AIRFLOW, needs_extra_config=True + ) + await self.configure_airflow_stack() + await self.go_to_orchestrate_tab() + print("Workbench Airflow completed") + + except Exception: + pods = [ + ("pomerium",), + ("airflow-webserver",), + ("airflow-scheduler", "s3-sync"), + ("airflow-scheduler", "scheduler"), + ("airflow-postgresql",), + ] + await self.dump_pod_logs(pods=pods) + raise + + finally: + await self.asyncTearDown() + + async def configure_airflow_stack(self): + await self.page.get_by_role("tab", name="Airflow settings").click() + await self.page.locator('[id="airflow_config\\.dags_source"]').select_option( + "git" + ) + await self.page.get_by_label("Git branch name").fill("main") + await self.screenshot(full_page=True) + await self.page.get_by_role("button", name="Save Changes").click() + await self.page.get_by_role( + "row", name="Analytics Development 0 service connections" + ).wait_for(state="attached") + await self.screenshot() + + async def go_to_orchestrate_tab(self): + await self.get_into_workbench(service=WorkbenchServicesEnum.AIRFLOW) + await self.page.goto(f"https://airflow-tst001.{self.domain}") + await self.screenshot() + await self.page.get_by_text("Sign In with datacoves").click() + await self.screenshot(delay=5) + await expect(self.page.get_by_role("heading", name="DAGs")).to_be_visible() diff --git a/src/core/api/app/integration_tests/workbench/test_workbench_airflow_s3.py b/src/core/api/app/integration_tests/workbench/test_workbench_airflow_s3.py new file mode 100644 index 00000000..6482a8a2 --- /dev/null +++ b/src/core/api/app/integration_tests/workbench/test_workbench_airflow_s3.py @@ -0,0 +1,94 @@ +import pytest +from integration_tests.workbench.base import ( + WorkbenchBase, + WorkbenchServicesEnum, + utils_test, +) +from playwright.async_api import expect + + +class WorkbenchAirflowWithS3Test(WorkbenchBase): + def setUp(self): + self._subpath = f"workbench/{__name__}" + return super().setUp() + + @pytest.mark.skip( + reason="No way of currently testing this, it's always failing on Github Action" + ) + @pytest.mark.asyncio + async def test_workbench_airflow_with_s3(self): + """ + Test that enables the Airflow stack service on Environment page + and ensures Airflow is available and accessible on the Workbench + + Steps: + 1. Login + 2. Go to Launchpad + 3. Go to Projects admin and enable Project by creating a SSH Key and testing it + 4. Go to Environments admin and toggle Airflow stack service + 5. Go to Launchpad and Workbench + 6. Go to Airflow and 'sign in with Datacoves' + 7. Assert Airflow landing content is present + """ + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + await self.cluster_setup(user=user) + + await self.go_to_launchpad(user.email, user_session["password"]) + await self.enable_project() + await self.enable_environment_service( + service=WorkbenchServicesEnum.AIRFLOW, needs_extra_config=True + ) + await self.configure_airflow_stack() + await self.go_to_orchestrate_tab() + print("Workbench Airflow with S3 completed") + + except Exception: + pods = [ + ("pomerium",), + ("airflow-webserver",), + ("airflow-scheduler", "s3-sync"), + ("airflow-scheduler", "scheduler"), + ("airflow-postgresql",), + ] + await self.dump_pod_logs(pods=pods) + raise + + finally: + await self.asyncTearDown() + + async def configure_airflow_stack(self): + await self.page.get_by_role("tab", name="Airflow settings").click() + await self.page.locator('[id="airflow_config\\.dags_source"]').select_option( + "s3" + ) + await self.page.page.get_by_label("Bucket path*").fill( + utils_test.secrets["airflow_s3_sync"]["bucket"] + ) + await self.page.get_by_role("combobox", name="Auth mechanism").select_option( + "iam-user" + ) + await self.page.get_by_label("Access key*").fill( + utils_test.secrets["airflow_s3_sync"]["access_key"] + ) + await self.page.get_by_label("Secret key*").fill( + utils_test.secrets["airflow_s3_sync"]["secret_key"] + ) + await self.screenshot() + await self.page.get_by_role("button", name="Save Changes").click() + await self.page.get_by_role( + "row", name="Analytics Development 0 service connections" + ).wait_for(state="attached") + await self.screenshot() + + async def go_to_orchestrate_tab(self): + await self.get_into_workbench(service=WorkbenchServicesEnum.AIRFLOW) + await self.page.goto(f"https://airflow-tst001.{self.domain}") + await self.screenshot() + await self.page.get_by_text("Sign In with datacoves").click() + await self.screenshot(delay=5) + await expect(self.page.get_by_role("heading", name="DAGs")).to_be_visible() diff --git a/src/core/api/app/integration_tests/workbench/test_workbench_docs.py b/src/core/api/app/integration_tests/workbench/test_workbench_docs.py new file mode 100644 index 00000000..1df60803 --- /dev/null +++ b/src/core/api/app/integration_tests/workbench/test_workbench_docs.py @@ -0,0 +1,133 @@ +import asyncio + +import pytest +from integration_tests.user_settings.base import git_ssh_key_test_connection +from integration_tests.workbench.base import ( + WorkbenchBase, + WorkbenchServicesEnum, + utils_test, +) +from playwright.async_api import expect + +data = { + "git_repo_ssh_url": "git@github.com:datacoves/balboa.git", + "db_name": "test_db_name", + "db_conn_template": "1", + "db_auth_type": "key", + "db_user": utils_test.secrets["snowflake_service_account"]["template_db_user"], + "db_password": utils_test.secrets["snowflake_service_account"][ + "template_db_password" + ], +} + + +class WorkbenchDocsTest(WorkbenchBase): + def setUp(self): + self._subpath = f"workbench/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_workbench_docs(self): + """ + Test that ensures Docs are accessible in the Workbench + + Steps: + 1. Login + 2. Go to Launchpad + 3. Go to Projects admin and enable Project by creating a SSH Key and testing it + 5. Go to Launchpad and Workbench + 6. Go to Observe + 7. Assert both docs and local-dbt-docs are visible + """ + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + await self.cluster_setup(user=user) + + await self.go_to_launchpad(user.email, user_session["password"]) + + btn_open_user_menu = self.page.get_by_role("button", name="Open user menu") + await btn_open_user_menu.wait_for(state="attached") + await btn_open_user_menu.evaluate("el => el.click()") + await self.page.get_by_role("menuitem", name="Settings").click() + await self.page.get_by_role("heading", name="Profile Settings").wait_for() + await self.screenshot() + + await self.git_ssh_key_gen() + await self.enable_project() + await self.enable_environment_service( + service=WorkbenchServicesEnum.DOCS, needs_extra_config=True + ) + await self.configure_docs_stack() + await self.go_to_observe_tab() + print("Workbench Docs completed") + + except Exception: + pods = [ + ("pomerium",), + ("airflow-webserver",), + ("airflow-scheduler", "s3-sync"), + ("airflow-scheduler", "scheduler"), + ("airflow-postgresql",), + ] + await self.dump_pod_logs(pods=pods) + raise + + finally: + await self.asyncTearDown() + + async def git_ssh_key_gen(self): + """Test generate ssh key to clone git repository""" + await self.page.get_by_role("tab", name="Git SSH Keys").click() + await self.page.locator("section").filter( + has_text="Git SSH keysAdd this SSH key to your git server account to clone your repos." + ).get_by_role("button", name="Add").click() + await self.page.get_by_role("menuitem", name="Auto-generate key pairs").click() + await self.clean_toast() + await self.page.get_by_role("button", name="COPY").click() + await expect( + self.page.get_by_text("SSH key copied to clipboard") + ).to_be_visible() + await self.clean_toast() + await git_ssh_key_test_connection( + page=self.page, + git_repo_ssh_url=data["git_repo_ssh_url"], + is_success=True, + ssh_key_title=self.ssh_key_title, + ) + await self.page.screenshot() + await self.clean_toast() + await self.page.goto(f"https://{self.domain}/launchpad") + + async def configure_docs_stack(self): + await self.page.get_by_role("tab", name="Docs settings").click() + await self.page.get_by_label("Git branch name*").fill("dbt-docs") + await self.screenshot() + await self.page.get_by_role("button", name="Save Changes").click() + await self.page.get_by_role( + "row", name="Analytics Development 0 service connections" + ).wait_for(state="attached") + await self.screenshot() + + async def go_to_observe_tab(self): + await self.get_into_workbench(service=WorkbenchServicesEnum.DOCS) + + # TO avoid the error: Nightly Can't Open This Page (Pomerium is not ready) + await asyncio.sleep(120) + await self.page.get_by_text("Observe").click() + await expect( + self.page.frame_locator('iframe[name="observe"]').get_by_role( + "heading", name="DBT Docs not generated yet" + ) + ).to_be_visible() + await self.screenshot() + await self.page.get_by_role("button", name="Docs", exact=True).click() + await expect( + self.page.frame_locator('iframe[name="observe"]').get_by_role( + "heading", name="Datacoves Demo" + ) + ).to_be_visible() + await self.screenshot() diff --git a/src/core/api/app/integration_tests/workbench/test_workbench_superset.py b/src/core/api/app/integration_tests/workbench/test_workbench_superset.py new file mode 100644 index 00000000..1e14dec6 --- /dev/null +++ b/src/core/api/app/integration_tests/workbench/test_workbench_superset.py @@ -0,0 +1,62 @@ +import pytest +from integration_tests.workbench.base import WorkbenchBase, WorkbenchServicesEnum +from playwright.async_api import expect + + +class WorkbenchSupersetTest(WorkbenchBase): + def setUp(self): + self._subpath = f"workbench/{__name__}" + return super().setUp() + + @pytest.mark.asyncio + async def test_workbench_superset(self): + """ + Test that enables the Superset stack service on Environment page + and ensures Superset is available and accessible on the Workbench + + Steps: + 1. Login + 2. Go to Launchpad + 3. Go to Projects admin and enable Project by creating a SSH Key and testing it + 4. Go to Environments admin and toggle Superset stack service + 5. Go to Launchpad and Workbench + 6. Go to Superset and 'sign in with Datacoves' + 7. Assert Superset landing content is present + """ + + try: + await self.asyncSetUp() + + user_session = await self.user_session() + user = user_session["user"] + await self.cluster_setup(user=user) + + await self.go_to_launchpad(user.email, user_session["password"]) + await self.enable_project() + await self.enable_environment_service( + service=WorkbenchServicesEnum.SUPERSET + ) + await self.go_to_analyze_tab() + print("Workbench Superset completed") + + except Exception: + pods = [ + ("pomerium",), + ("superset-init-db",), + ("superset",), + ] + await self.dump_pod_logs(pods=pods) + raise + + finally: + await self.asyncTearDown() + + async def go_to_analyze_tab(self): + await self.get_into_workbench(service=WorkbenchServicesEnum.SUPERSET) + await self.page.goto(f"https://superset-tst001.{self.domain}") + await self.screenshot() + await self.page.get_by_text("Sign In with datacoves").click() + await self.screenshot(delay=5) + await expect( + self.page.get_by_text("Other dashboards will appear here") + ).to_be_visible(timeout=self.get_timeout(minutes=2)) diff --git a/src/core/api/app/integrations/__init__.py b/src/core/api/app/integrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/integrations/admin.py b/src/core/api/app/integrations/admin.py new file mode 100644 index 00000000..ffb42f18 --- /dev/null +++ b/src/core/api/app/integrations/admin.py @@ -0,0 +1,17 @@ +from core.fields import EncryptedJSONField +from django.contrib import admin +from django_json_widget.widgets import JSONEditorWidget + +from datacoves.admin import BaseModelAdmin + +from .models import Integration + + +@admin.register(Integration) +class IntegrationAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("name", "account", "type", "is_default") + list_filter = ("type",) + search_fields = ("name", "account__name") + formfield_overrides = { + EncryptedJSONField: {"widget": JSONEditorWidget}, + } diff --git a/src/core/api/app/integrations/apps.py b/src/core/api/app/integrations/apps.py new file mode 100644 index 00000000..3e16fca9 --- /dev/null +++ b/src/core/api/app/integrations/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class IntegrationsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "integrations" diff --git a/src/core/api/app/integrations/migrations/0001_initial.py b/src/core/api/app/integrations/migrations/0001_initial.py new file mode 100644 index 00000000..8e885fd3 --- /dev/null +++ b/src/core/api/app/integrations/migrations/0001_initial.py @@ -0,0 +1,39 @@ +# Generated by Django 3.2.6 on 2022-09-28 15:55 + +import autoslug.fields +import core.fields +import django.db.models.deletion +import integrations.models +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('users', '0003_user_slug'), + ] + + operations = [ + migrations.CreateModel( + name='Integration', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.CharField(max_length=250)), + ('slug', autoslug.fields.AutoSlugField(editable=False, populate_from=integrations.models.integration_slug, unique=True)), + ('type', models.CharField(choices=[('smtp', 'SMTP'), ('msteams', 'MS Teams'), ('slack', 'Slack')], max_length=50)), + ('settings', core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True)), + ('validated_at', models.DateTimeField(blank=True, null=True)), + ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.account')), + ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_integrations', to=settings.AUTH_USER_MODEL)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/integrations/migrations/0002_alter_integration_type.py b/src/core/api/app/integrations/migrations/0002_alter_integration_type.py new file mode 100644 index 00000000..30d4f08c --- /dev/null +++ b/src/core/api/app/integrations/migrations/0002_alter_integration_type.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-10-06 21:29 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('integrations', '0001_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='integration', + name='type', + field=models.CharField(choices=[('smtp', 'SMTP'), ('msteams', 'MS Teams'), ('slack', 'Slack'), ('sentry', 'Sentry')], max_length=50), + ), + ] diff --git a/src/core/api/app/integrations/migrations/0003_alter_integration_settings.py b/src/core/api/app/integrations/migrations/0003_alter_integration_settings.py new file mode 100644 index 00000000..3091b7ad --- /dev/null +++ b/src/core/api/app/integrations/migrations/0003_alter_integration_settings.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('integrations', '0002_alter_integration_type'), + ] + + operations = [ + migrations.AlterField( + model_name='integration', + name='settings', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='This is a JSON dictionary with settings. Right now, only INTEGRATION_TYPE_SMTP settings are validated.', null=True), + ), + ] diff --git a/src/core/api/app/integrations/migrations/0004_integration_is_default.py b/src/core/api/app/integrations/migrations/0004_integration_is_default.py new file mode 100644 index 00000000..1ac2cdd7 --- /dev/null +++ b/src/core/api/app/integrations/migrations/0004_integration_is_default.py @@ -0,0 +1,21 @@ +# Generated by Django 5.0.7 on 2025-02-17 13:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("integrations", "0003_alter_integration_settings"), + ] + + operations = [ + migrations.AddField( + model_name="integration", + name="is_default", + field=models.BooleanField( + default=False, + help_text="Default integrations are automatically added to new Environments.", + ), + ), + ] diff --git a/src/core/api/app/integrations/migrations/__init__.py b/src/core/api/app/integrations/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/integrations/models/__init__.py b/src/core/api/app/integrations/models/__init__.py new file mode 100644 index 00000000..a61de25d --- /dev/null +++ b/src/core/api/app/integrations/models/__init__.py @@ -0,0 +1 @@ +from .integration import * # noqa: F401,F403 diff --git a/src/core/api/app/integrations/models/integration.py b/src/core/api/app/integrations/models/integration.py new file mode 100644 index 00000000..91f1c9ec --- /dev/null +++ b/src/core/api/app/integrations/models/integration.py @@ -0,0 +1,108 @@ +from autoslug import AutoSlugField +from core.fields import EncryptedJSONField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings as django_settings +from django.core.exceptions import ValidationError +from django.db import models +from users.models import Account + +from ..validation import validate_smtp_settings + + +def integration_slug(instance): + return f"{instance.name}-{instance.account.slug}" + + +class Integration(AuditModelMixin, DatacovesModel): + """Integration setting storage model + + This is for storing settings to our supported integrations (see constants + below). Additionally, it stores 'validated_at' which is an important + field which indicates if we have validated the settings or not. + + SMTP settings have validation (see validate_smtp_settings), the others + do not at this time. + + ========= + Constants + ========= + + - INTEGRATION_TYPE_SMTP + - INTEGRATION_TYPE_MSTEAMS + - INTEGRATION_TYPE_SLACK + - INTEGRATION_TYPE_SLACK + - INTEGRATION_TYPES - a tuple of tuple pairs for populating select boxes + + ======= + Methods + ======= + + - **clean()** - Private method to run validation + - **save(...)** - Overriden save to run 'clean'. + """ + + INTEGRATION_TYPE_SMTP = "smtp" + INTEGRATION_TYPE_MSTEAMS = "msteams" + INTEGRATION_TYPE_SLACK = "slack" + INTEGRATION_TYPE_SENTRY = "sentry" + INTEGRATION_TYPES = ( + (INTEGRATION_TYPE_SMTP, "SMTP"), + (INTEGRATION_TYPE_MSTEAMS, "MS Teams"), + (INTEGRATION_TYPE_SLACK, "Slack"), + (INTEGRATION_TYPE_SENTRY, "Sentry"), + ) + + name = models.CharField(max_length=250) + account = models.ForeignKey(Account, on_delete=models.CASCADE) + slug = AutoSlugField(populate_from=integration_slug, unique=True) + type = models.CharField(max_length=50, choices=INTEGRATION_TYPES) + settings = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="This is a JSON dictionary with settings. Right now, " + "only INTEGRATION_TYPE_SMTP settings are validated.", + ) + validated_at = models.DateTimeField(blank=True, null=True) + + created_by = models.ForeignKey( + django_settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + related_name="created_integrations", + blank=True, + null=True, + ) + is_default = models.BooleanField( + default=False, + help_text="Default integrations are automatically added to new Environments.", + ) + + def __str__(self): + return f"{self.type}:{self.name}" + + def clean(self): + """Validate settings if we can - raises ValidationError if needed""" + if self.type == self.INTEGRATION_TYPE_SMTP: + validate_smtp_settings(self.settings) + # There can only be 1 default SMTP integration + if self.is_default and self.__class__.objects.filter( + type=self.INTEGRATION_TYPE_SMTP, is_default=True + ).exclude(pk=self.pk): + raise ValidationError("There can only be one default SMTP integration") + + def save(self, *args, **kwargs): + """Wrapper around save to run the validation call""" + self.clean() + return super().save(*args, **kwargs) + + @property + def is_notification(self): + return self.type in [ + self.INTEGRATION_TYPE_MSTEAMS, + self.INTEGRATION_TYPE_SLACK, + ] + + @property + def is_system(self): + return self.created_by is None diff --git a/src/core/api/app/integrations/permissions.py b/src/core/api/app/integrations/permissions.py new file mode 100644 index 00000000..1faf2936 --- /dev/null +++ b/src/core/api/app/integrations/permissions.py @@ -0,0 +1,10 @@ +from clusters.request_utils import get_cluster +from rest_framework import permissions + + +class IsIntegrationsAdminEnabled(permissions.BasePermission): + message = "Integrations admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_integrations"] or features["admin_environments"] diff --git a/src/core/api/app/integrations/serializers.py b/src/core/api/app/integrations/serializers.py new file mode 100644 index 00000000..8ca7b87d --- /dev/null +++ b/src/core/api/app/integrations/serializers.py @@ -0,0 +1,21 @@ +from rest_framework import serializers +from users.models import Account + +from .models import Integration + + +class IntegrationSerializer(serializers.ModelSerializer): + class Meta: + model = Integration + fields = ["id", "name", "type", "settings", "is_notification", "is_default"] + + def create(self, validated_data): + validated_data["account"] = Account.objects.get(slug=self.context["account"]) + validated_data["created_by"] = self.context["request"].user + return super().create(validated_data) + + def to_representation(self, instance): + rep = super().to_representation(instance) + if "password" in rep["settings"]: + del rep["settings"]["password"] + return rep diff --git a/src/core/api/app/integrations/validation.py b/src/core/api/app/integrations/validation.py new file mode 100644 index 00000000..f87bea38 --- /dev/null +++ b/src/core/api/app/integrations/validation.py @@ -0,0 +1,24 @@ +from typing import Optional + +from django.core.exceptions import ValidationError +from pydantic import BaseModel, EmailStr +from pydantic import ValidationError as PydanticValidationError + + +class SMTPSettings(BaseModel): + server: str + host: str + port: int + mail_from: EmailStr + user: Optional[str] + password: Optional[str] + ssl: Optional[bool] = False + start_tls: Optional[bool] = True + + +def validate_smtp_settings(settings): + if settings.get("server") == "custom": + try: + SMTPSettings(**settings) + except PydanticValidationError as ex: + raise ValidationError(ex) diff --git a/src/core/api/app/integrations/views.py b/src/core/api/app/integrations/views.py new file mode 100644 index 00000000..0b39f881 --- /dev/null +++ b/src/core/api/app/integrations/views.py @@ -0,0 +1,65 @@ +from core.mixins.views import ( + AddAccountToContextMixin, + VerboseCreateModelMixin, + VerboseUpdateModelMixin, +) +from django.core.exceptions import ValidationError +from django_filters.rest_framework import DjangoFilterBackend +from iam.permissions import HasResourcePermission +from rest_framework import filters, generics, status +from rest_framework.permissions import IsAuthenticated +from rest_framework.response import Response + +from .models import Integration +from .permissions import IsIntegrationsAdminEnabled +from .serializers import IntegrationSerializer + + +class IntegrationMixin(AddAccountToContextMixin): + serializer_class = IntegrationSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsIntegrationsAdminEnabled, + ] + + def get_queryset(self): + return Integration.objects.filter( + account__slug=self.kwargs.get("account_slug") + ).order_by("name") + + +class IntegrationList( + IntegrationMixin, + VerboseCreateModelMixin, + generics.ListCreateAPIView, +): + filter_backends = [filters.SearchFilter, DjangoFilterBackend] + search_fields = ["name"] + + +class IntegrationDetail( + IntegrationMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + def perform_destroy(self, instance): + """Prevent deletion of default Integrations""" + if instance.is_default: + return Response( + "Default integrations cannot be deleted", + status=status.HTTP_400_BAD_REQUEST, + ) + else: + try: + super().perform_destroy(instance) + return Response(status=status.HTTP_204_NO_CONTENT) + except ValidationError as ex: + return Response( + ex.message, + status=status.HTTP_400_BAD_REQUEST, + ) + + def destroy(self, request, *args, **kwargs): + instance = self.get_object() + return self.perform_destroy(instance) diff --git a/src/core/api/app/invitations/__init__.py b/src/core/api/app/invitations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/invitations/admin.py b/src/core/api/app/invitations/admin.py new file mode 100644 index 00000000..17accc96 --- /dev/null +++ b/src/core/api/app/invitations/admin.py @@ -0,0 +1,17 @@ +from django.contrib import admin +from django.db import models +from django_json_widget.widgets import JSONEditorWidget + +from datacoves.admin import BaseModelAdmin + +from .models import Invitation + + +@admin.register(Invitation) +class InvitationAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + list_display = ("name", "account", "email", "inviter", "user", "accepted_at") + search_fields = ("name", "account__name", "email", "inviter__name") + list_filter = ("account",) diff --git a/src/core/api/app/invitations/apps.py b/src/core/api/app/invitations/apps.py new file mode 100644 index 00000000..4ffda890 --- /dev/null +++ b/src/core/api/app/invitations/apps.py @@ -0,0 +1,9 @@ +from django.apps import AppConfig + + +class InvitationsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "invitations" + + def ready(self): + pass diff --git a/src/core/api/app/invitations/migrations/0001_initial.py b/src/core/api/app/invitations/migrations/0001_initial.py new file mode 100644 index 00000000..b6e193a3 --- /dev/null +++ b/src/core/api/app/invitations/migrations/0001_initial.py @@ -0,0 +1,29 @@ +# Generated by Django 3.2.6 on 2022-03-03 16:08 + +from django.db import migrations, models +import invitations.models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Invitation', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('accepted_at', models.DateTimeField(blank=True, null=True, verbose_name='accepted')), + ('key', models.CharField(default=invitations.models.get_invitation_key, max_length=64, unique=True, verbose_name='key')), + ('sent_at', models.DateTimeField(blank=True, null=True, verbose_name='sent')), + ('attempts', models.PositiveIntegerField(default=0)), + ('email', models.EmailField(max_length=254, verbose_name='e-mail address')), + ('name', models.CharField(max_length=130)), + ], + ), + ] diff --git a/src/core/api/app/invitations/migrations/0002_initial.py b/src/core/api/app/invitations/migrations/0002_initial.py new file mode 100644 index 00000000..52ecd038 --- /dev/null +++ b/src/core/api/app/invitations/migrations/0002_initial.py @@ -0,0 +1,44 @@ +# Generated by Django 3.2.6 on 2022-03-03 16:08 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('invitations', '0001_initial'), + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('users', '0001_initial'), + ('auth', '0012_alter_user_first_name_max_length'), + ] + + operations = [ + migrations.AddField( + model_name='invitation', + name='account', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.account'), + ), + migrations.AddField( + model_name='invitation', + name='groups', + field=models.ManyToManyField(blank=True, help_text='The groups this user will belongs to. A user will get all permissions granted to each of their groups.', related_name='invitations', to='auth.Group', verbose_name='groups'), + ), + migrations.AddField( + model_name='invitation', + name='inviter', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='created_invitations', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='invitation', + name='user', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='received_invitations', to=settings.AUTH_USER_MODEL), + ), + migrations.AddConstraint( + model_name='invitation', + constraint=models.UniqueConstraint(fields=('account', 'email'), name='Account invited email uniqueness'), + ), + ] diff --git a/src/core/api/app/invitations/migrations/0003_remove_invitation_account_invited_email_uniqueness.py b/src/core/api/app/invitations/migrations/0003_remove_invitation_account_invited_email_uniqueness.py new file mode 100644 index 00000000..62bbd9d2 --- /dev/null +++ b/src/core/api/app/invitations/migrations/0003_remove_invitation_account_invited_email_uniqueness.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-11-18 17:19 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('invitations', '0002_initial'), + ] + + operations = [ + migrations.RemoveConstraint( + model_name='invitation', + name='Account invited email uniqueness', + ), + ] diff --git a/src/core/api/app/invitations/migrations/0004_alter_invitation_key.py b/src/core/api/app/invitations/migrations/0004_alter_invitation_key.py new file mode 100644 index 00000000..fa46bdf1 --- /dev/null +++ b/src/core/api/app/invitations/migrations/0004_alter_invitation_key.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +from django.db import migrations, models +import invitations.models.invitation + + +class Migration(migrations.Migration): + + dependencies = [ + ('invitations', '0003_remove_invitation_account_invited_email_uniqueness'), + ] + + operations = [ + migrations.AlterField( + model_name='invitation', + name='key', + field=models.CharField(default=invitations.models.invitation.get_invitation_key, help_text='The invitation key used to accept the invitation.', max_length=64, unique=True, verbose_name='key'), + ), + ] diff --git a/src/core/api/app/invitations/migrations/__init__.py b/src/core/api/app/invitations/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/invitations/models/__init__.py b/src/core/api/app/invitations/models/__init__.py new file mode 100644 index 00000000..bcad5e19 --- /dev/null +++ b/src/core/api/app/invitations/models/__init__.py @@ -0,0 +1 @@ +from .invitation import * # noqa: F401,F403 diff --git a/src/core/api/app/invitations/models/invitation.py b/src/core/api/app/invitations/models/invitation.py new file mode 100644 index 00000000..5179ba02 --- /dev/null +++ b/src/core/api/app/invitations/models/invitation.py @@ -0,0 +1,250 @@ +import logging +from datetime import timedelta + +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.contrib.auth.models import Group +from django.db import models +from django.db.models import Q +from django.forms import ValidationError +from django.urls import reverse +from django.utils import timezone +from django.utils.crypto import get_random_string +from django.utils.translation import gettext_lazy as _ +from users.models import Account, ExtendedGroup, User + +from ..services import EmailSender + +logger = logging.getLogger(__name__) + + +class InvitationManager(models.Manager): + def expired(self): + return self.filter(self._expired_and_accepted_q()) + + def valid(self): + return self.exclude(self._expired_and_accepted_q()) + + def _expired_and_accepted_q(self): + sent_threshold = timezone.now() - timedelta( + days=settings.INVITATION_EXPIRY_DAYS + ) + # accepted and sent more than expiry days ago + q = Q(accepted_at__isnull=False) | Q(sent_at__lt=sent_threshold) + return q + + def remove_expired_for(self, account_slug: str, email: str): + """Removes expired invitations for an account_slug and email""" + sent_threshold = timezone.now() - timedelta( + days=settings.INVITATION_EXPIRY_DAYS + ) + + self.filter( + account__slug=account_slug, + email=email.lower(), + accepted_at__isnull=True, + sent_at__lt=sent_threshold, + ).delete() + + +def get_invitation_key(): + return get_random_string(64).lower() + + +class Invitation(AuditModelMixin, DatacovesModel): + """Invitation for new users + + This stores information about the entire lifecycle of an invitation to + the user to join the system, including expirations. It uses its own + custom manager that provides 'expired', 'valid', and 'removed_expired_for' + query features for easily looking up Invitations. + + Expiration is controlled by settings.INVITATION_EXPIRY_DAYS + + You can only re-send invitations up to settings.INVITATION_MAX_ATTEMPTS + times. + + Emails are stored lower-case only. + + ========= + Constants + ========= + + - STATUS_ACCEPTED + - STATUS_EXPIRED + - STATUS_PENDING + - STATUS_CREATED + + The difference between STATUS_CREATED and STATUS_PENDING is that + STATUS_CREATED hasn't yet been sent via email. + + ======= + Methods + ======= + + - **key_expired()** - returns True is key is expired + - **was_accepted()** - returns True if invitation was accepted + - **accept()** - Performs all actions needed when invitation is accepted + - **can_send_invitation()** - True if one can send the invitation again + - **send_invitation(request, ...)** - Sends the invitation. Raises + ValidationError if can_send_invitation would return False. kwargs + are used as defaults for the context that is sent to the email template, + and thus can be used for additional template variables. It will + be overriden by invite_url, account_name, email, name, key, inviter + from the local object. This makes passing kwargs pretty meaningless + because the email template is always the same and the keys set by the + model cannot be overridden by kwargs, but we could improve + this in the future if we needed more dynamic templates/multiple + templates. + - **save(...)** - lowercases all emails before allowing a save + """ + + STATUS_ACCEPTED = "accepted" + STATUS_EXPIRED = "expired" + STATUS_PENDING = "pending" + STATUS_CREATED = "created" + + accepted_at = models.DateTimeField( + verbose_name=_("accepted"), null=True, blank=True + ) + key = models.CharField( + verbose_name=_("key"), + max_length=64, + unique=True, + default=get_invitation_key, + help_text="The invitation key used to accept the invitation.", + ) + sent_at = models.DateTimeField(verbose_name=_("sent"), null=True, blank=True) + attempts = models.PositiveIntegerField(default=0) + inviter = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.CASCADE, + related_name="created_invitations", + ) + account = models.ForeignKey(Account, on_delete=models.CASCADE) + groups = models.ManyToManyField( + Group, + verbose_name=_("groups"), + blank=True, + help_text=_( + "The groups this user will belongs to. A user will get all permissions " + "granted to each of their groups." + ), + related_name="invitations", + ) + email = models.EmailField(verbose_name=_("e-mail address")) + name = models.CharField(max_length=130) + user = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.CASCADE, + related_name="received_invitations", + null=True, + blank=True, + ) + + objects = InvitationManager() + + @property + def status(self) -> str: + """Returns the status of the invitation""" + status = self.STATUS_CREATED + if self.sent_at: + status = self.STATUS_PENDING + if self.accepted_at: + status = self.STATUS_ACCEPTED + elif self.key_expired(): + status = self.STATUS_EXPIRED + return status + + def key_expired(self) -> bool: + """Returns if the key is expired or not. Key expiration is managed + by settings.INVITATION_EXPIRY_DAYS + """ + + if not self.sent_at: + return False + expiration_date = self.sent_at + timedelta(days=settings.INVITATION_EXPIRY_DAYS) + return expiration_date <= timezone.now() + + def was_accepted(self) -> bool: + """True if the invitation was accepted""" + return self.accepted_at is not None + + def accept(self) -> True: + """Accepts the invitation and sets necessary fields. Creates the + new user if needed, associates them with the account, and adds any + groups needed to the user object. + """ + + try: + account_group = Group.objects.get( + extended_group__role=ExtendedGroup.Role.ROLE_DEFAULT, + extended_group__account__slug=self.account, + ) + except Group.DoesNotExist: + logging.error( + "Group does not exists: %s - %s", + self.account.name, + ExtendedGroup.Role.ROLE_DEFAULT, + ) + return False + + self.user, _ = User.objects.get_or_create( + email=self.email, defaults={"name": self.name} + ) + self.accepted_at = timezone.now() + self.save() + self.user.groups.add(account_group) + for group in self.groups.all(): + self.user.groups.add(group) + + return True + + def can_send_invitation(self) -> bool: + """Only allow settings.INVITATION_MAX_ATTEMPTS attempts to send""" + return self.attempts < settings.INVITATION_MAX_ATTEMPTS + + def send_invitation(self, request, **kwargs): + """Sends an invitation. Can send a ValidationError if the + user is no longer allowed to send invitations to this email + due to too many attempts + + Requires a request object in order to build the URI for the + invitation email. + + Uses the 'invitations/email/email_invite' template. + """ + + if not self.can_send_invitation(): + raise ValidationError("Max attempts to send invitation has been reached.") + + invite_url = reverse("accept-invite", kwargs={"invite_key": self.key}) + invite_url = request.build_absolute_uri(invite_url) + ctx = kwargs + ctx.update( + { + "invite_url": invite_url, + "account_name": self.account.name, + "email": self.email, + "name": self.name, + "key": self.key, + "inviter": self.inviter, + } + ) + + email_template = "invitations/email/email_invite" + + EmailSender.send_mail(email_template, self.email, ctx) + self.attempts += 1 + self.sent_at = timezone.now() + self.save() + + def __str__(self): + return f"Invite: {self.email}" + + def save(self, *args, **kwargs): + """Enforce lower case emails""" + + self.email = self.email.lower() + super().save(*args, **kwargs) diff --git a/src/core/api/app/invitations/permissions.py b/src/core/api/app/invitations/permissions.py new file mode 100644 index 00000000..2beee802 --- /dev/null +++ b/src/core/api/app/invitations/permissions.py @@ -0,0 +1,10 @@ +from clusters.request_utils import get_cluster +from rest_framework import permissions + + +class IsInvitationsAdminEnabled(permissions.BasePermission): + message = "Invitations admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_invitations"] or features["admin_users"] diff --git a/src/core/api/app/invitations/serializers.py b/src/core/api/app/invitations/serializers.py new file mode 100644 index 00000000..f844d8d3 --- /dev/null +++ b/src/core/api/app/invitations/serializers.py @@ -0,0 +1,64 @@ +from django.forms import ValidationError +from invitations.models import Invitation +from rest_framework import serializers +from users.models import Account, User + +from .models import ExtendedGroup + + +class InvitationSerializer(serializers.ModelSerializer): + class Meta: + model = Invitation + fields = ("name", "email", "groups", "id", "status") + + def validate(self, attrs): + account_slug = self.context["account"] + email = attrs["email"] + existing_user = User.objects.filter(email__iexact=email).first() + if existing_user and existing_user.accounts.filter(slug=account_slug).first(): + raise ValidationError(f"User {email} is already a member of the account.") + if ( + Invitation.objects.valid() + .filter(account__slug=account_slug, email__iexact=email) + .count() + > 0 + ): + raise ValidationError(f"{email} already has a pending invitation.") + return attrs + + def create(self, validated_data): + request = self.context["request"] + account_slug = self.context["account"] + validated_data["inviter"] = request.user + validated_data["account"] = Account.objects.get(slug=account_slug) + Invitation.objects.remove_expired_for(account_slug, validated_data["email"]) + instance = super().create(validated_data=validated_data) + instance.send_invitation(request) + return instance + + def to_representation(self, instance): + """Returning group names to avoid an extra request on clients""" + data = super().to_representation(instance) + data["groups"] = [ + {"id": group.id, "name": group.extended_group.name} + for group in instance.groups.exclude( + extended_group__role=ExtendedGroup.Role.ROLE_DEFAULT + ) + ] + return data + + +class ResendInvitationSerializer(serializers.ModelSerializer): + class Meta: + model = Invitation + fields = ("status",) + + def validate(self, attrs): + if not self.instance.can_send_invitation(): + raise ValidationError("Max attempts to send invitation has been reached.") + return attrs + + def update(self, instance, validated_data): + request = self.context["request"] + instance.send_invitation(request) + return instance diff --git a/src/core/api/app/invitations/services.py b/src/core/api/app/invitations/services.py new file mode 100644 index 00000000..b7294947 --- /dev/null +++ b/src/core/api/app/invitations/services.py @@ -0,0 +1,63 @@ +from django.conf import settings +from django.core.mail import EmailMessage, EmailMultiAlternatives +from django.template import TemplateDoesNotExist +from django.template.loader import render_to_string +from django.utils.encoding import force_str + + +class EmailSender: + @classmethod + def _render_mail(cls, template_prefix, email, context, subject=None): + """ + Renders an e-mail to `email`. `template_prefix` identifies the + e-mail that is to be sent, e.g. "account/email/email_confirmation" + """ + if not subject: + subject = render_to_string(f"{template_prefix}_subject.txt", context) + # remove superfluous line breaks + subject = " ".join(subject.splitlines()).strip() + subject = force_str(subject) + if isinstance(email, str): + to = [email] + else: + to = email + + bodies = {} + for ext in ["html", "txt"]: + try: + template_name = f"{template_prefix}_message.{ext}" + bodies[ext] = render_to_string(template_name, context).strip() + except TemplateDoesNotExist: + if ext == "txt" and not bodies: + # We need at least one body + raise + msg = cls.create_mail_message( + subject, + to, + settings.DEFAULT_FROM_EMAIL, + body_txt=bodies.get("txt"), + body_html=bodies.get("html"), + ) + return msg + + @classmethod + def send_mail(cls, template_prefix, email, context, subject=None): + msg = cls._render_mail(template_prefix, email, context, subject=subject) + msg.send() + + @staticmethod + def create_mail_message( + subject, + to, + from_email=settings.DEFAULT_FROM_EMAIL, + body_txt=None, + body_html=None, + ): + if body_txt: + msg = EmailMultiAlternatives(subject, body_txt, from_email, to) + if body_html: + msg.attach_alternative(body_html, "text/html") + else: + msg = EmailMessage(subject, body_html, from_email, to) + msg.content_subtype = "html" # Main content is now text/html + return msg diff --git a/src/core/api/app/invitations/templates/invitations/email/email_invite_message.html b/src/core/api/app/invitations/templates/invitations/email/email_invite_message.html new file mode 100644 index 00000000..6b0e9d24 --- /dev/null +++ b/src/core/api/app/invitations/templates/invitations/email/email_invite_message.html @@ -0,0 +1,444 @@ +{% load i18n %} {% autoescape off %} {% blocktrans %} + + + + + + + + + + This is preheader text. Some clients will show this text as a + preview. + + + + + + + + + +{% endblocktrans %} {% endautoescape %} diff --git a/src/core/api/app/invitations/templates/invitations/email/email_invite_message.txt b/src/core/api/app/invitations/templates/invitations/email/email_invite_message.txt new file mode 100644 index 00000000..901e95c0 --- /dev/null +++ b/src/core/api/app/invitations/templates/invitations/email/email_invite_message.txt @@ -0,0 +1,12 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %} + +Hello {{ name }}, + +You ({{ email }}) have been invited to join {{ account_name }} in Datacoves. + +If you'd like to join, please go to {{ invite_url }} + +{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/invitations/templates/invitations/email/email_invite_subject.txt b/src/core/api/app/invitations/templates/invitations/email/email_invite_subject.txt new file mode 100644 index 00000000..6080e448 --- /dev/null +++ b/src/core/api/app/invitations/templates/invitations/email/email_invite_subject.txt @@ -0,0 +1,4 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %}Join {{ account_name }} in using Datacoves{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/invitations/tests.py b/src/core/api/app/invitations/tests.py new file mode 100644 index 00000000..72b2ec74 --- /dev/null +++ b/src/core/api/app/invitations/tests.py @@ -0,0 +1,146 @@ +from unittest.mock import patch + +from django.test import TestCase +from factories import ClusterFactory, InvitationFactory, ProjectFactory, UserFactory +from invitations.models import Invitation + + +class KubectlMock: + """Mock class to Kubectl client""" + + def get_ingress_controller_ips(self): + return "10.0.0.10", "192.168.100.10" + + def get_cluster_apiserver_ips(self): + return {} + + +class CeleryInspectMock: + """Mock class to Celery Inspect""" + + def reserved(self): + return {} + + +class RequestMock: + """Mock request object""" + + def build_absolute_uri(self, path): + return "" + + +class EmailSenderMock: + """Mock EmailSender class""" + + def send_mail(email_template, email, ctx): + return "" + + +customer_data = { + "id": "cus_P5kbR1mwSv8j4x", + "object": "customer", + "address": None, + "balance": 0, + "created": 1701207936, + "currency": None, + "default_currency": None, + "default_source": None, + "delinquent": False, + "description": None, + "discount": None, + "email": "test@datacoveslocal.com", + "invoice_prefix": "65A2AE89", + "invoice_settings": { + "custom_fields": None, + "default_payment_method": None, + "footer": None, + "rendering_options": None, + }, + "livemode": False, + "metadata": {}, + "name": "test-2", + "next_invoice_sequence": 1, + "phone": None, + "preferred_locales": [], + "shipping": None, + "tax_exempt": "none", + "test_clock": None, +} + + +session_data = { + "customer_update": {"address": "auto", "name": "auto"}, + "automatic_tax": {"enabled": "True"}, + "cancel_url": "https://datacoveslocal.com/admin/billing/cancel", + "customer": "cus_P5kbR1mwSv8j4x", + "line_items": {"0": {"quantity": "1", "price": "price_1NxZJ8LF8qmfSSrQgfUna6jl"}}, + "success_url": "https://datacoveslocal.com/admin/billing/checkout?session_id={CHECKOUT_SESSION_ID}", + "mode": "subscription", + "subscription_data": {"metadata": {"plan": "growth-monthly"}}, +} + + +class InvitationTests(TestCase): + """ + Test invitation code + """ + + @patch("lib.kubernetes.client.Kubectl", return_value=KubectlMock()) + @patch("datacoves.celery.app.control.inspect", return_value=CeleryInspectMock()) + def setUp(self, mock_inspect, mock_kubernetes) -> None: + self.cluster = ClusterFactory.create() + self.project = ProjectFactory.create() + self.account = self.project.account + self.inviter_user = UserFactory.create() + + @patch("invitations.services.EmailSender", return_value=EmailSenderMock()) + def test_send_and_accept_invite_no_prev_user(self, email_sender_mock): + invitation = InvitationFactory.create( + inviter=self.inviter_user, + account=self.account, + email="nonexisting@datacoves.com", + ) + request = RequestMock() + invitation.send_invitation(request) + self.assertIs(invitation.status, Invitation.STATUS_PENDING) + invitation.accept() + self.assertIs(invitation.status, Invitation.STATUS_ACCEPTED) + + @patch("invitations.services.EmailSender", return_value=EmailSenderMock()) + def test_send_and_accept_invite_with_prev_user_same_email(self, email_sender_mock): + invitation = InvitationFactory.create( + inviter=self.inviter_user, + account=self.account, + email="existing@datacoves.com", + ) + existing_user = UserFactory.create(email="existing@datacoves.com") + created_at = existing_user.created_at + self.assertIsNotNone(created_at) + request = RequestMock() + invitation.send_invitation(request) + self.assertIs(invitation.status, Invitation.STATUS_PENDING) + invitation.accept() + self.assertIs(invitation.status, Invitation.STATUS_ACCEPTED) + self.assertEqual(invitation.user.created_at, created_at) + + @patch("datacoves.celery.app.control.inspect", return_value=CeleryInspectMock()) + @patch("invitations.services.EmailSender", return_value=EmailSenderMock()) + def test_send_and_accept_invite_with_prev_user_different_email_case( + self, inspect_mock, email_sender_mock + ): + invitation = InvitationFactory.create( + inviter=self.inviter_user, + account=self.account, + email="differentcase@datacoves.com", + name="existing", + ) + existing_user = UserFactory.create(email="DifferentCase@datacoves.com") + created_at = existing_user.created_at + self.assertIsNotNone(created_at) + request = RequestMock() + invitation.send_invitation(request) + self.assertIs(invitation.status, Invitation.STATUS_PENDING) + invitation.accept() + self.assertIs(invitation.status, Invitation.STATUS_ACCEPTED) + self.assertIs(invitation.user.id, existing_user.id) + self.assertEqual(invitation.user.created_at, created_at) diff --git a/src/core/api/app/invitations/views.py b/src/core/api/app/invitations/views.py new file mode 100644 index 00000000..60526b45 --- /dev/null +++ b/src/core/api/app/invitations/views.py @@ -0,0 +1,74 @@ +from core.mixins.views import AddAccountToContextMixin +from django.conf import settings +from django.http import HttpResponse +from django.shortcuts import redirect +from django.views.generic import View +from django_filters.rest_framework import DjangoFilterBackend +from iam.permissions import ( + AccountIsNotOnTrial, + AccountIsNotSuspended, + HasResourcePermission, +) +from rest_framework import filters, generics +from rest_framework.permissions import IsAuthenticated + +from .models import Invitation +from .permissions import IsInvitationsAdminEnabled +from .serializers import InvitationSerializer, ResendInvitationSerializer + + +class AcceptInvite(View): + def get(self, request, *args, invite_key=None, **kwargs): + try: + invitation = Invitation.objects.get(key=invite_key.lower()) + except Invitation.DoesNotExist: + return redirect(settings.INVITATION_ERROR_URL) + + if invitation.key_expired(): + return redirect(settings.INVITATION_ERROR_URL) + + # If invitation was not previously accepted + if not invitation.was_accepted() and not invitation.accept(): + return redirect(settings.INVITATION_ERROR_URL) + + return redirect(settings.INVITATION_SUCCESS_REDIRECT) + + +def invite_error(request): + # TODO: Implement this view + msg = "Invalid invite link." + return HttpResponse(msg) + + +class InvitationMixin: + serializer_class = InvitationSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsInvitationsAdminEnabled, + AccountIsNotOnTrial, + AccountIsNotSuspended, + ] + + def get_queryset(self): + return Invitation.objects.valid().filter( + account__slug=self.kwargs.get("account_slug") + ) + + +class InvitationList( + InvitationMixin, AddAccountToContextMixin, generics.ListCreateAPIView +): + filter_backends = [filters.SearchFilter, DjangoFilterBackend] + search_fields = ["name", "email"] + filterset_fields = ["groups"] + + +class InvitationDetail( + InvitationMixin, AddAccountToContextMixin, generics.RetrieveDestroyAPIView +): + pass + + +class ResendInvitation(InvitationMixin, generics.UpdateAPIView): + serializer_class = ResendInvitationSerializer diff --git a/src/core/api/app/lib/__init__.py b/src/core/api/app/lib/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/lib/airflow.py b/src/core/api/app/lib/airflow.py new file mode 100644 index 00000000..66713c3f --- /dev/null +++ b/src/core/api/app/lib/airflow.py @@ -0,0 +1,520 @@ +""" +Library file for interacting with Airflow API on our environment instances. +""" + +import json +from http import HTTPStatus +from typing import Dict, List + +import requests +from django.conf import settings +from projects.models import NAMESPACE_PREFIX, Environment, ServiceCredential +from rest_framework.authtoken.models import Token + +# What are variable names that Airflow considers secret? This comes from +# https://github.com/apache/airflow/blob/main/task_sdk/src/airflow/sdk/execution_time/secrets_masker.py +# +# This list can also be altered based on configuration, but we currently do +# not do this. +DEFAULT_SENSITIVE_FIELDS = { + "access_token", + "api_key", + "apikey", + "authorization", + "passphrase", + "passwd", + "password", + "private_key", + "secret", + "token", + "keyfile_dict", + "service_account", +} + + +def is_secret_variable_name(name: str) -> bool: + """Is this variable name considered a secret?""" + + for field in DEFAULT_SENSITIVE_FIELDS: + if field in name.lower(): + return True + + return False + + +class NoSecretsManagerException(Exception): + """We have no secrets manager""" + + pass + + +class ConfigIsMissingException(Exception): + """We have no secrets manager configuration""" + + pass + + +class AirflowAPI: + def __init__(self, slug: str, api_key: str): + """Set up the API class to do an API call using 'slug' environment's + webserver and the given API key. + """ + + self.slug = slug + self.url = ( + f"http://{slug}-airflow-webserver.{NAMESPACE_PREFIX}{slug}:8080/api/v1/" + ) + self.headers = { + "Authorization": f"Bearer {api_key}", + "Content-Type": "application/json", + "Accept": "application/json", + } + + # If for_environment_service_user is used, then sometimes we need + # the token for other things as well, so we'll store the Token here. + # This is not typical behavior so it isn't on the constructor. + self.token = None + self.is_secrets_backend_enabled = False + + def _handle_request_error(self, result: requests.Response) -> None: + """Handles request errors by raising a RuntimeError.""" + if not result.ok: + raise RuntimeError( + f"Got a {result.status_code} {result.request.method}:{result.request.url} to " + f"{self.slug}: {result.text}" + ) + + @classmethod + def is_api_enabled(cls, env: Environment) -> bool: + """Check if the API is enabled for the environment.""" + return env.airflow_config.get("api_enabled", False) + + @classmethod + def get_secrets_backend_enabled(cls, env: Environment) -> bool: + """Check if the API is enabled for the environment.""" + return env.airflow_config.get("secrets_backend_enabled", False) + + @classmethod + def for_environment_service_user(cls, env: Environment): + """ + This returns an AirflowAPI that is set up for a given environment's + service user, ready to go. + + Throws NoSecretsManagerException if we couldn't initialize an + AirflowAPI because there was no token for the environment. + """ + + if not cls.is_api_enabled(env=env): + raise ConfigIsMissingException("The Airflow API is not enabled.") + + from iam.serializers import MyTokenObtainPairSerializer + + token = Token.objects.filter( + key=env.airflow_config.get("service_account_token") + ).first() + + # This probably means we aren't using secrets manager + if token is None: + raise NoSecretsManagerException("Credentials not found.") + + api = cls( + env.slug, + str(MyTokenObtainPairSerializer.get_token(token.user).access_token), + ) + + api.token = token + api.is_secrets_backend_enabled = cls.get_secrets_backend_enabled(env=env) + + return api + + def _get_all(self, endpoint: str) -> list: + """ + Private method that handles the common code for fetching all of + the paginated variables / connections. endpoint should be + 'connections' or 'variables' + """ + + offset = 0 + results = [] + + while True: + response = requests.get( + f"{self.url}{endpoint}", + headers=self.headers, + params={ + "limit": 100, + "offset": offset, + }, + ) + + if response.status_code != 200: + self._handle_request_error(response) + + result = response.json() + + results += result[endpoint] + + if len(results) < result["total_entries"]: + offset += 100 + else: + break + + return results + + def get_connections(self): + """ + Returns a list of connections as a dictionary + + This is the structure of the dictionaries returned: + + https://airflow.apache.org/docs/apache-airflow/stable/stable-rest-api-ref.html#operation/get_connections + + This fetches *everything* in a list. + """ + + return self._get_all("connections") + + def get_connection(self, connection_id: str): + """Returns the connection as a dict, or a None if unset + + This is the structure of the dictionary returned: + + https://airflow.apache.org/docs/apache-airflow/stable/stable-rest-api-ref.html#operation/get_variable + """ + + result = requests.get( + f"{self.url}connections/{connection_id}", headers=self.headers + ) + + if result.status_code == 200: + return result.json() + + if result.status_code == 404: + return None + + # This is an error + raise RuntimeError( + f"Got a {result.status_code} from airflow {self.slug}: {result.text}" + ) + + def _prepare_connection_payload( + self, + connection_id: str, + conn_type: str, + description: str | None = None, + host: str | None = None, + port: int | None = None, + login: str | None = None, + password: str | None = None, + schema: str | None = None, + extra: str | None = None, + ) -> dict: + ret = { + "connection_id": connection_id, + "conn_type": conn_type, + } + + if description: + ret["description"] = description + + if host: + ret["host"] = host + + if port: + ret["port"] = port + + if login: + ret["login"] = login + + if password: + ret["password"] = password + + if schema: + ret["schema"] = schema + + if extra: + ret["extra"] = extra + + return ret + + def create_connection( + self, + connection_id: str, + conn_type: str, + description: str | None = None, + host: str | None = None, + port: int | None = None, + login: str | None = None, + password: str | None = None, + schema: str | None = None, + extra: str | None = None, + ): + try: + result = requests.post( + f"{self.url}connections", + headers=self.headers, + json=self._prepare_connection_payload( + connection_id, + conn_type, + description, + host, + port, + login, + password, + schema, + extra, + ), + ) + self._handle_request_error(result) + + except requests.RequestException as e: + raise RuntimeError(f"Request failed: {e}") + + def update_connection( + self, + connection_id: str, + conn_type: str, + description: str | None = None, + host: str | None = None, + port: int | None = None, + login: str | None = None, + password: str | None = None, + schema: str | None = None, + extra: str | None = None, + ): + try: + result = requests.patch( + f"{self.url}connections/{connection_id}", + headers=self.headers, + json=self._prepare_connection_payload( + connection_id, + conn_type, + description, + host, + port, + login, + password, + schema, + extra, + ), + ) + self._handle_request_error(result) + + except requests.RequestException as e: + raise RuntimeError(f"Request failed: {e}") + + def create_or_update_connection(self, *args, **kwargs): + """This creates 'connection_id' if it doesn't exist, or updates it if + it does. See create_connection / update_connection for the + paramaters this can take. + """ + + existing = self.get_connection( + args[0] if len(args) > 0 else kwargs["connection_id"] + ) + + if existing is None: + self.create_connection(*args, **kwargs) + else: + self.update_connection(*args, **kwargs) + + def get_variables(self): + """ + Returns a list of variables as a dictionary + + This is the structure of the dictionaries returned: + + https://airflow.apache.org/docs/apache-airflow/stable/stable-rest-api-ref.html#operation/get_variables + + This fetches *everything* in a list. + """ + + return self._get_all("variables") + + def get_variable(self, key: str): + """Returns the varaible as a dict, or a None if unset + + This is the structure of the dictionary returned: + + https://airflow.apache.org/docs/apache-airflow/stable/stable-rest-api-ref.html#operation/get_variable + """ + + try: + result = requests.get(f"{self.url}variables/{key}", headers=self.headers) + if result.status_code == 404: + return None + + self._handle_request_error(result) + return result.json() + + except requests.RequestException as e: + raise RuntimeError(f"Request failed: {e}") + + def create_variable(self, key: str, value: str, description: str): + try: + result = requests.post( + f"{self.url}variables", + headers=self.headers, + json={"key": key, "description": description, "value": value}, + ) + self._handle_request_error(result) + + except requests.RequestException as e: + raise RuntimeError(f"Request failed: {e}") + + def update_variable(self, key: str, value: str, description: str): + try: + result = requests.patch( + f"{self.url}variables/{key}", + headers=self.headers, + json={"key": key, "description": description, "value": value}, + ) + self._handle_request_error(result) + + except requests.RequestException as e: + raise RuntimeError(f"Request failed: {e}") + + def create_or_update_variable(self, key: str, value: str, description: str): + """This creates 'key' if it doesn't exist, or updates it if it does""" + + existing = self.get_variable(key) + + if existing is None: + self.create_variable(key, value, description) + else: + self.update_variable(key, value, description) + + def delete_variable(self, key: str): + """Deletes a variable by key""" + + try: + result = requests.delete(f"{self.url}variables/{key}", headers=self.headers) + self._handle_request_error(result) + + except requests.RequestException as e: + raise RuntimeError(f"Request failed: {e}") + + def get_role(self, role_name: str) -> dict: + """Returns the varaible as a dict, or a None if unset + + This is the structure of the dictionary returned: + + https://airflow.apache.org/docs/apache-airflow/stable/stable-rest-api-ref.html#operation/get_role + """ + + try: + result = requests.get( + url=f"{self.url}roles/{role_name}", headers=self.headers + ) + # If the role is not found, return None + if result.status_code == HTTPStatus.NOT_FOUND: + return None + + self._handle_request_error(result) + return result.json() + + except requests.RequestException as e: + raise RuntimeError(f"Request failed: {e}") + + def create_role(self, role_name: str, actions: List[Dict[str, str]]): + try: + payload = {"name": role_name, "actions": actions} + result = requests.post( + url=f"{self.url}roles", + headers=self.headers, + json=payload, + ) + self._handle_request_error(result) + + except requests.RequestException as e: + raise RuntimeError(f"Failed to create role: {e}") + + def update_role(self, role_name: str, actions: List[Dict[str, str]]): + try: + payload = {"name": role_name, "actions": actions} + result = requests.patch( + url=f"{self.url}roles/{role_name}", headers=self.headers, json=payload + ) + self._handle_request_error(result) + + except requests.RequestException as e: + raise RuntimeError(f"Failed to update role: {e}") + + def create_or_update_role(self, role_name: str, actions: List[Dict[str, str]]): + """Creates the role if it doesn't exist, or updates it if it does.""" + existing = self.get_role(role_name=role_name) + + if existing is None: + self.create_role(role_name=role_name, actions=actions) + else: + self.update_role(role_name=role_name, actions=actions) + + +def push_secrets_to_airflow(env: Environment): + """We need to do this a few places, so this centralizes the logic. + Throws exception on failures. + + Your environment should ideally have select_releated("project"). + """ + + try: + api = AirflowAPI.for_environment_service_user(env) + + except NoSecretsManagerException: + return + + api.create_or_update_variable( + "datacoves-primary-secret", + api.token.key, + "Do not delete or edit - this is the secret that powers Airflow's " + "integration with the Datacoves Secret Manager.", + ) + + if env.project.secrets_secondary_backend: + api.create_or_update_variable( + "datacoves-secondary-secret", + json.dumps( + { + "backend": env.project.secrets_secondary_backend, + "backend_config": env.project.secrets_secondary_backend_config + if env.project.secrets_secondary_backend_config + else {}, + } + ), + "Do not delete or edit - this is the secret that powers Airflow's " + "integration with the Datacoves Secondary Secret Manager.", + ) + + elif api.get_variable("datacoves-secondary-secret") is not None: + # Delete the secret if we don't need it anymore. + api.delete_variable("datacoves-secondary-secret") + + # Push airbyte if we're rollin' like that. + if env.is_service_enabled("airbyte"): + api.create_or_update_connection( + connection_id="airbyte_connection", + conn_type="airbyte", + description="Automatically Added by Datacoves", + host=f"{env.slug}-airbyte-airbyte-server-svc", + port=8001, + ) + + # Push service connections into Airflow. Service must be delivery mode + # connection, for the airflow service, and validated. + for conn in env.service_credentials.filter( + delivery_mode=ServiceCredential.DELIVERY_MODE_CONNECTION, + service=settings.SERVICE_AIRFLOW, + validated_at__isnull=False, + ).select_related("connection_template", "connection_template__type"): + api.create_or_update_connection(**conn.get_airflow_connection()) + + # Push datacoves-dbt-api-secret into a variable + if "system_api_key" not in env.settings: + env.create_permissions() + + api.create_or_update_variable( + "datacoves-dbt-api-secret", + env.settings["system_api_key"], + "Do not delete or edit - this secret is used to integrate with dbt " + "API and is managed by Datacoves.", + ) diff --git a/src/core/api/app/lib/argument_parsing_utils.py b/src/core/api/app/lib/argument_parsing_utils.py new file mode 100644 index 00000000..cdeddca0 --- /dev/null +++ b/src/core/api/app/lib/argument_parsing_utils.py @@ -0,0 +1,29 @@ +# Argument parsing utils + + +def parse_release(release): + return remove_suffix(remove_prefix_dir(release, "releases/"), ".yaml") + + +def parse_cluster_domain(cluster_domain): + return remove_suffix(remove_prefix_dir(cluster_domain, "config/"), "/") + + +def parse_image_path(image_path): + return remove_suffix(remove_prefix_dir(image_path, "src/"), "/") + + +def remove_prefix_dir(s, prefix_dir): + return remove_prefix(remove_prefix(s, "./"), prefix_dir) + + +def remove_prefix(s, prefix): + if s.startswith(prefix): + s = s[len(prefix) :] + return s + + +def remove_suffix(s, suffix): + if s.endswith(suffix): + s = s[: -len(suffix)] + return s diff --git a/src/core/api/app/lib/channel.py b/src/core/api/app/lib/channel.py new file mode 100644 index 00000000..f733c165 --- /dev/null +++ b/src/core/api/app/lib/channel.py @@ -0,0 +1,47 @@ +import json +import logging +import threading + +from asgiref.sync import async_to_sync +from channels.layers import get_channel_layer + +from lib.utils import serialize_datetime + +logger = logging.getLogger(__name__) + + +def run_in_thread(consumer: str, group_name: str, message_type: str, payload: dict): + """Function to send messages to a Channels group in a separate thread.""" + channel_layer = get_channel_layer() + async_to_sync(channel_layer.group_send)( + group_name, + { + "type": consumer, + "message_type": message_type, + "message": json.dumps(payload, default=serialize_datetime), + }, + ) + + +class DjangoChannelNotify: + def __init__( + self, consumer: str, group_name: str, message_type: str, payload: dict + ): + self.consumer = consumer + self.group_name = group_name + self.message_type = message_type + self.payload = payload + self.thread = None # To reference the thread + + def __enter__(self): + self.thread = threading.Thread( + target=run_in_thread, + args=(self.consumer, self.group_name, self.message_type, self.payload), + daemon=True, # Allows the thread to close with the application + ) + self.thread.start() + return self # Can be used `as context` if necessary + + def __exit__(self, exc_type, exc_value, traceback): + if self.thread: + self.thread.join(timeout=2) # Waits a bit and does not block indefinitely diff --git a/src/core/api/app/lib/cmd.py b/src/core/api/app/lib/cmd.py new file mode 100644 index 00000000..649d6cd8 --- /dev/null +++ b/src/core/api/app/lib/cmd.py @@ -0,0 +1,89 @@ +import inspect +import os +import subprocess +import sys +from pprint import pprint + + +def main(module=sys.modules["__main__"]): + program_name, *all_args = sys.argv + command, args = None, [] + commands = module_functions(module) + + program_name = os.path.basename(program_name) + command_name = program_name.replace("-", "_") + command = commands.get(command_name) + + if not command: + if len(all_args) == 0 or all_args in (["help"], ["-h"], ["--help"]): + help(program_name, commands) + exit(1) + command_name, *args = all_args + command_name = command_name.replace("-", "_") + command = commands.get(command_name) + if command_name == "help" or args in (["-h"], ["--help"]): + if command_name == "help": + command_name = args[0] + print_command_help(command_name, commands.get(command_name)) + exit(0) + + if not command: + print(f"error: no command named {command_name}", file=sys.stderr) + exit(1) + try: + result = command(*args) + if result is not None: + pprint(result) + except subprocess.CalledProcessError as err: + exit(err.returncode) + + +def help(program_name, commands): + print(f"usage: {program_name} []\n") + print("Available commands:") + for cmd, f in commands.items(): + print_command_help(cmd, f) + + +def print_command_help(cmd, f): + if f and f.__doc__: + print(f" \033[1m{cmd}\033[0m{inspect.signature(f)}") + print(f" {f.__doc__}") + + +def module_functions(module): + return { + name: func + for name, func in inspect.getmembers(module, inspect.isfunction) + if inspect.getmodule(func) == module + } + + +def run(command, *args, check=True, env=None, cwd=None, capture_output=False): + if isinstance(command, str): + command = command.split() + kwargs = {} + if capture_output: + kwargs["stdout"] = subprocess.PIPE + kwargs["stderr"] = subprocess.PIPE + return subprocess.run(command + list(args), check=check, env=env, cwd=cwd, **kwargs) + + +def sh(command, env=None): + return subprocess.run(command, check=True, shell=True, env=env) + + +def exec(command, *args): + if isinstance(command, str): + command = command.split() + os.execvp(command[0], command + list(args)) + + +def output(command, *args, encoding="ascii"): + if isinstance(command, str): + command = command.split() + return subprocess.check_output(command + list(args), encoding=encoding) + + +def lines(*args, **kwargs): + return output(*args, **kwargs).splitlines() diff --git a/src/core/api/app/lib/config/__init__.py b/src/core/api/app/lib/config/__init__.py new file mode 100644 index 00000000..da29d298 --- /dev/null +++ b/src/core/api/app/lib/config/__init__.py @@ -0,0 +1,3 @@ +""" +The config module deals with datacoves config directories. +""" diff --git a/src/core/api/app/lib/config/config.py b/src/core/api/app/lib/config/config.py new file mode 100644 index 00000000..afa5ce44 --- /dev/null +++ b/src/core/api/app/lib/config/config.py @@ -0,0 +1,112 @@ +import os +from pathlib import Path + +from lib import dicts +from lib.config_files import load_file, load_yaml +from lib.docker import docker + +DATACOVES_DIR = Path.cwd() +SECRETS_DIR = None +OUTPUT_DIR = DATACOVES_DIR / ".generated" +CORE_DIR = OUTPUT_DIR / "core" +OPERATOR_DIR = OUTPUT_DIR / "operator" +PROMETHEUS_DIR = OUTPUT_DIR / "prometheus" + +GENERAL_NODE_SELECTOR = {"k8s.datacoves.com/nodegroup-kind": "general"} +VOLUMED_NODE_SELECTOR = {"k8s.datacoves.com/nodegroup-kind": "volumed"} +WORKER_NODE_SELECTOR = {"k8s.datacoves.com/workers": "enabled"} + +NODE_SELECTORS = [GENERAL_NODE_SELECTOR, VOLUMED_NODE_SELECTOR, WORKER_NODE_SELECTOR] +NODE_SELECTORS_KEYS = [list(selector.keys())[0] for selector in NODE_SELECTORS] + +cluster_domain = None +config = None +release = None + + +def load_cluster_params(params_yaml_path): + params_yaml = load_file(params_yaml_path, optional=False) + + global cluster_domain + cluster_domain = params_yaml["domain"] + + global SECRETS_DIR + SECRETS_DIR = Path(os.path.dirname(params_yaml_path)) / "secrets" + + global config + cert_manager_issuer = cluster_domain.endswith(".jnj.com") and "sectigo" + is_local = cluster_is_localhost() + config = dicts.pick_dict( + params_yaml, + { + "release": params_yaml["release"], + "docker_registry": "", + "docker_config_secret_name": "docker-config-datacovesprivate", + "application_id": "Datacoves", + "generate_docker_secret": "docker_config_secret_name" not in params_yaml, + "cert_manager_issuer": cert_manager_issuer, + "external_dns_url": None, + "run_core_api_db_in_cluster": None, + "dont_use_uwsgi": None, + "celery_worker_autoreload": None, + "local_api_volume": is_local, + "local_dbt_api_volume": is_local, + "local_dbt_api_minio": is_local, + "local_workbench_volume": is_local, + "enable_dbt_api": is_local, + "expose_dbt_api": is_local, + "flower_service": is_local, + "local_workbench_image": None, + "defines_resource_requests": not is_local, + "defines_pdb": not is_local, + "core_liveness_readiness": not is_local, + "root_tls_secret_name": None, + "wildcard_tls_secret_name": None, + "ssl_redirect": not is_local, + "block_workers": None, + "observability_stack": not is_local, + "observability_stack_resources": None, + "operator_sentry_dsn": "", + "grafana": None, + "loki_minio_password": "", + "core_postgres_config": None, + "core_minio_config": None, + "tests_runner": is_local, + "install_node_local_dns": False, + "min_replicas_worker_main": 1 if is_local else 2, + "min_replicas_worker_long": 1 if is_local else 2, + "min_replicas_api": 1, + }, + ) + + global release + release_filename = config["release"] + ".yaml" + release = load_yaml(DATACOVES_DIR / "releases" / release_filename) + + +def cluster_is_localhost(): + return cluster_domain.endswith("local.com") + + +def load_envs(cluster_domain) -> dict: + envs_path = Path(f"config/{cluster_domain}/environments") + if not envs_path.exists(): + return {} + envs = [ + env + for env in os.listdir(envs_path) + if Path(envs_path / env / "environment.yaml").is_file() + ] + return {env: load_file(envs_path / env / "environment.yaml") for env in envs} + + +def docker_image_name(img): + return docker.docker_image_name(img, config.get("docker_registry")) + + +def docker_image_tag(img): + return docker.docker_image_tag(img, release) + + +def docker_image_name_and_tag(img): + return docker.docker_image_name_and_tag(img, config.get("docker_registry"), release) diff --git a/src/core/api/app/lib/config/validate.py b/src/core/api/app/lib/config/validate.py new file mode 100644 index 00000000..99b5cbee --- /dev/null +++ b/src/core/api/app/lib/config/validate.py @@ -0,0 +1,40 @@ +from pathlib import Path + +from lib.config_files import load_file + + +def validate_config(path: Path): + validate_cluster_config(path) + for envdir in path.glob("environments/*"): + validate_environment_config(envdir) + + +def validate_cluster_config(path): + validate_file(path / "cluster-params.yaml", ClusterParamsConfig) + validate_file(path / "pricing.yaml", PricingConfig, optional=True) + validate_file(path / "secrets/core-api.env") + validate_file(path / "docker-config.secret.json") + + +def validate_environment_config(path): + validata_file(path / "environment.yaml", EnvironmentConfig) + validata_file(path / "airflow.yaml", AirflowConfig, optional=True) + validata_file(path / "airbyte.yaml", AirbyteConfig, optional=True) + + +def validate_file(path, schema=None, optional=False): + if path.endswith(".env"): + # TODO: Read env files data. For now, only validate it exists. + assert not optional and path.exists(), f"Missing file {path}." + return + + if optional and not path.exists(): + return + + data = load_file(path, optional=False) + if schema: + validate(data, schema) + + +def validate(data, schema): + raise NotImplemented() diff --git a/src/core/api/app/lib/config_files.py b/src/core/api/app/lib/config_files.py new file mode 100644 index 00000000..cd4ed846 --- /dev/null +++ b/src/core/api/app/lib/config_files.py @@ -0,0 +1,173 @@ +""" +Read and write config files. Most commonly used: load_file, write_yaml. +""" + +import base64 +import json +import re +import sys +from pathlib import Path + +import yaml + +from .dicts import deep_merge + +### Directories ### + + +def mkdir(path): + if not isinstance(path, Path): + path = Path(path) + path.mkdir(parents=True, exist_ok=True) + + +### Loading ### + + +def load_file(path, optional=True): + if isinstance(path, Path): + path = str(path) + parts = path.rsplit(".", 1) + prefix, suffix = parts if len(parts) == 2 else (parts[0], "yaml") + loaders = { + "yaml": load_yaml, + "yml": load_yaml, + "json": load_json, + "env": load_env_file, + } + loader = loaders.get(suffix, load_yaml) + + path = Path(f"{prefix}.{suffix}") + path_secret = Path(f"{prefix}.secret.{suffix}") + path_local = Path(f"{prefix}.local.{suffix}") + res = {} + if path.is_file(): + config = loader(path) + if config: + res.update(config) + elif not optional: + raise FileNotFoundError(path) + if path_secret.is_file(): + config = loader(path_secret) + if config: + res = deep_merge(config, res) + if path_local.is_file(): + config = loader(path_local) + if config: + res = deep_merge(config, res) + return res + + +def load_json(path): + with open(path, "r") as f: + return json.load(f) + + +def load_yaml(path): + with open(path, "r") as f: + return yaml.safe_load(f) + + +def load_env_file(path): + env = {} + with open(path, "r") as f: + for line in f: + line = line.strip() + if line.startswith("#") or "=" not in line: + continue + name, value = line.split("=", 1) + env[name.strip()] = value.strip() + return env + + +def load_as_base64(path): + with open(path, "rb") as f: + return str(base64.b64encode(f.read()), encoding="ascii") + + +def load_text_file(path): + with open(path, "r") as f: + return f.read() + + +### Writing ### + + +class YAMLDumper(yaml.Dumper): + def ignore_aliases(*args): + return True + + def increase_indent(self, flow=False, *args, **kwargs): + return super().increase_indent(flow=flow, indentless=False) + + +def emit_yamls(dest_dir, files): + for path, file_data in files.items(): + with open(dest_dir / path, "w+") as f: + if isinstance(file_data, str): + print(file_data, file=f) + else: + print_yamls(file_data, file=f) + + +def write_file(path, data): + with open(path, "w+") as f: + print(data, file=f) + + +def write_yaml(path, data): + with open(path, "w+") as f: + print_yamls(data, file=f) + + +def print_yamls(resources, file=sys.stdout): + if isinstance(resources, dict): + resources = [resources] + first = True + for res in resources: + if not first: + print("---", file=file) + print( + yaml.dump(res, default_flow_style=False, Dumper=YAMLDumper), + end="", + file=file, + ) + first = False + print(file=file) + + +### Update ### + + +def update_file(path, f): + assert callable(f) + contents = "" + with open(path, "r", encoding="utf-8") as file: + contents = file.read() + new_contents = f(contents) + with open(path, "w") as file: + file.write(new_contents) + + +def replace_in_file(path, pattern, replacement): + if not isinstance(pattern, re.Pattern): + pattern = re.compile(pattern, flags=re.MULTILINE) + update_file(path, lambda s: re.sub(pattern, replacement, s)) + + +def secret_value_from_json(path, key, encode=False): + with open(path, "r") as f: + data = f.read() + value = json.loads(data)[key] + return ( + str(base64.b64encode(value.encode()), encoding="ascii") if encode else value + ) + + +def secret_value_from_yaml(path, key, encode=False): + with open(path, "r") as f: + data = f.read() + value = yaml.load(data, Loader=yaml.FullLoader)[key] + return ( + str(base64.b64encode(value.encode()), encoding="ascii") if encode else value + ) diff --git a/src/core/api/app/lib/dicts.py b/src/core/api/app/lib/dicts.py new file mode 100644 index 00000000..b005a9b4 --- /dev/null +++ b/src/core/api/app/lib/dicts.py @@ -0,0 +1,46 @@ +from copy import deepcopy + + +def deep_merge(new_values, default_values): + """Merge new values into default values dict, overrding existing values""" + + def merge(source, destination): + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = destination.setdefault(key, {}) + merge(value, node) + else: + destination[key] = value + return destination + + default = deepcopy(default_values) + return merge(new_values, default) + + +def pick_fields(model_class, data): + from django.core.exceptions import FieldDoesNotExist + + d = {} + for k, v in data.items(): + try: + model_class._meta.get_field(k) + d[k] = v + except FieldDoesNotExist: + pass + return d + + +def set_in(d, path, val): + """ + Given a dict, path and value, modifies the dict and creates the needed + dicts based on path to set the value properly + """ + for i in range(len(path) - 1): + d = d.setdefault(path[i], {}) + d[path[-1]] = val + + +def pick_dict(src, subset): + """Make a dictionary with keys from subset and values from src, or subset.""" + return {k: src.get(k, default) for k, default in subset.items()} diff --git a/src/core/api/app/lib/doc_compiler.py b/src/core/api/app/lib/doc_compiler.py new file mode 100644 index 00000000..6dcbeefd --- /dev/null +++ b/src/core/api/app/lib/doc_compiler.py @@ -0,0 +1,317 @@ +""" +The purpose of this is to 'compile' a set of docsify-laid out pages into +a static HTML version. This script takes two parameters; an input and an +output. The input should be the root directory path of the docsify project. +The output will be a directory path where we will output the completed files. +""" + +import os +import re +import shutil +import xml.etree.ElementTree as etree + +import markdown +from bs4 import BeautifulSoup +from markdown.blockprocessors import BlockQuoteProcessor +from markdown.extensions import Extension +from markdown.extensions.codehilite import CodeHiliteExtension +from pygments.formatters import HtmlFormatter + +DATACOVES_DOCS_URL = "https://docs.datacoves.com" + + +class BlockQuoteWithAttributes(BlockQuoteProcessor): + """This adds the [!TIP], [!WARNING], etc. support to blockquotes""" + + # For alert types + RE_ALERTS = re.compile(r"^\[!(TIP|WARNING|NOTE|ATTENTION)\]") + + def run(self, parent: etree.Element, blocks: list[str]) -> None: + block = blocks.pop(0) + m = self.RE.search(block) + + # We need an 'alert_type' variable in here for alert support + # It can be tip, note, warning, or attention (or blank) + alert_type = "" + + if m: + before = block[: m.start()] # Lines before blockquote + + # Pass lines before blockquote in recursively for parsing first. + self.parser.parseBlocks(parent, [before]) + + # Remove `> ` from beginning of each line. + block = "\n".join( + [self.clean(line) for line in block[m.start() :].split("\n")] + ) + + type_match = self.RE_ALERTS.search(block) + + if type_match: + alert_type = type_match.group(1).lower() + block = block[len(type_match.group(0)) :] + + sibling = self.lastChild(parent) + + if sibling is not None and sibling.tag == "blockquote": + # Previous block was a blockquote so set that as this blocks parent + quote = sibling + + else: + # Add attributes as needed + attributes = {} + + if alert_type: + attributes["class"] = "alert callout " + alert_type + + # This is a new blockquote. Create a new parent element. + quote = etree.SubElement(parent, "blockquote", attrib=attributes) + + # Add our

title tag if we need it + if alert_type: + title = etree.SubElement( + quote, + "p", + attrib={ + "class": "title", + }, + ) + + # add our little span + span = etree.SubElement( + title, "span", attrib={"class": f"icon icon-{alert_type}"} + ) + + span.tail = alert_type[0].upper() + alert_type[1:] + + # Recursively parse block with blockquote as parent. + # change parser state so blockquotes embedded in lists use `p` tags + self.parser.state.set("blockquote") + self.parser.parseChunk(quote, block) + self.parser.state.reset() + + +class DocsifyMarkdownExtension(Extension): + """To make an extension for markdown""" + + def extendMarkdown(self, md): + md.parser.blockprocessors.register( + BlockQuoteWithAttributes(md.parser), "quote", 100 + ) + + +class DocsifyCodeCustomFormatter(HtmlFormatter): + """This makes Python Markdown's code blocks conform to the layout that + docsify uses (and thus the CSS wants)""" + + def __init__(self, lang_str="", **options): + super().__init__(**options) + + # lang_str has the value {lang_prefix}{lang} + # specified by the CodeHilite's options + self.lang_str = lang_str + + def _wrap_code(self, source): + yield 0, f'' + yield from source + yield 0, "" + + +class DocsifyTemplate: + """A basic class to handle a docsify template""" + + def __init__(self, base_dir: str, template_name: str = "index.template.html"): + """Read in a template from a given base directory and set it up + as a compiled Docsify Template + """ + + self.base_dir = base_dir + self.template_body = "" + + with open(f"{base_dir}/{template_name}", "rt") as input: + self.template_body = input.read() + + self.template_body = self.handle_injects(self.template_body) + + def load_and_parse_md(self, md_file: str) -> str: + """Load a markdown file and convert it to HTML. Return the HTML.""" + + with open(f"{md_file}", "rt") as input: + return markdown.markdown( + input.read(), + tab_length=2, + extensions=[ + "extra", + DocsifyMarkdownExtension(), + CodeHiliteExtension(pygments_formatter=DocsifyCodeCustomFormatter), + ], + ) + + def handle_injects(self, template: str) -> str: + """Handles @inject(filename) style tokens)""" + + next_pieces = None + pieces = template.split("@inject(", 1) + ret = pieces[0] + + while len(pieces) > 1: + next_pieces = pieces[1].split(")", 1) + + if len(next_pieces) != 2: + raise RuntimeError("Unmatched paren with an @inject tag") + + # next_pieces 0 will be the file to load, next_pieces 1 is + # the remainder of the file + + # Inject it + ret += self.load_and_parse_md(f"{self.base_dir}/{next_pieces[0]}") + + # Find our next inject + pieces = next_pieces[1].split("@inject(", 1) + ret += pieces[0] + + return ret + + def render(self, md_file: str) -> str: + """Renders a markdown file 'md_file' in the template, which replaces + the @content tag + """ + + content = self.load_and_parse_md(md_file) + content = self.template_body.replace("@content", content) + + # Turn '@datacoves.com' into links + matches = re.findall(r"\w+@datacoves.com", content) + already_replaced = set() + + for email in matches: + if email not in already_replaced: + content = content.replace( + email, f'{email}' + ) + + already_replaced.add(email) + + # Do manipulations in beautifulsoup + html = BeautifulSoup(content, "html.parser") + + # Fix any links that start with / and end in .md + for link in html.find_all("a"): + # Start things with / that need to be started with / + # + # Only alter local links -- things that start with http:// + # or https:// we will assume go to logical places and should + # not be touched. + if ( + "href" in link.attrs + and not link["href"].startswith("http://") + and not link["href"].startswith("https://") + and not link["href"].startswith("mailto:") + ): + # I guess we have an empty URL somewhere + if not len(link["href"]): + continue + + # make it start with / + if link["href"][0] != "/": + link["href"] = "/" + link["href"] + + # Split off the '#' if we have it + url_and_hash = link["href"].split("#", 1) + + # Convert .md to .html + if url_and_hash[0].endswith(".md"): + url_and_hash[0] = url_and_hash[0][:-2] + "html" + + # Make sure there's an extension if there is nothing + if url_and_hash[0][-1:] != "/" and "." not in url_and_hash[0]: + url_and_hash[0] = url_and_hash[0] + ".html" + + link["href"] = "#".join(url_and_hash) + + # Fix h1, h2, h3, h4, h5, h6 to have names + for x in range(1, 7): + for tag in html.find_all(f"h{x}"): + anchor = html.new_tag("a") + anchor["name"] = re.sub( + r"[^\w\d]+", "-", tag.text.strip().lower() + ).strip("-") + tag.insert_before(anchor) + + # Fix pre tag + for parent_div in html.find_all("div", class_="codehilite"): + inner_pre = parent_div.find("pre") + inner_code = inner_pre.find("code") + inner_pre["class"] = inner_code.attrs.get("class", []) + ["codehilite"] + inner_pre["data-lang"] = [ + inner_code.attrs.get("class", ["language-"])[0][9:] + ] + + parent_div.replace_with(inner_pre) + + return html.prettify() + + +def recursive_process_dirs( + template: DocsifyTemplate, base_in: str, base_out: str, static_collector: list +): + """For each file in directory base_in, process in the following fashion: + * Files starting with _ are ignored + * Files not ending in .md are ignored + * README.md becomes index.html + + Directories are created in base_out if needed, then entered and + iterated over. + """ + + for filename in os.listdir(base_in): + fullpath_in = f"{base_in}/{filename}" + fullpath_out = f"{base_out}/{filename}" + + if os.path.isdir(fullpath_in): + if not os.path.isdir(fullpath_out): + os.makedirs(fullpath_out) + + recursive_process_dirs( + template, fullpath_in, fullpath_out, static_collector + ) + + elif filename[0] != "_" and filename[-3:] == ".md": + # If filename is README.md, the destination becomes index.html + # instead. + if filename == "README.md": + fullpath_out = f"{base_out}/index.html" + else: + # replace .md with .html + fullpath_out = fullpath_out[:-2] + "html" + + with open(fullpath_out, "wt") as output: + output.write(template.render(fullpath_in)) + static_collector.append(fullpath_out) + elif "assets" in fullpath_in: + # Copy assets over + shutil.copyfile(fullpath_in, fullpath_out) + + +def generate_sitemap_txt(out_dir: str, statics_generated: list[str]): + """ + Generate a sitemap.txt with all the static pages generated + Place it in output/robots.txt file + """ + sitemap_path = f"{out_dir}/sitemap.txt" + with open(sitemap_path, "w") as sitemap: + for static in statics_generated: + sitemap.write(f"{static.replace(out_dir, DATACOVES_DOCS_URL)}\n") + with open(f"{out_dir}/robots.txt", "w") as robots: + robots.write(f"Sitemap: {sitemap_path.replace(out_dir, DATACOVES_DOCS_URL)}") + + +def main(input_base, output_base): + # Process index.template.html into a template + template = DocsifyTemplate(input_base) + + # Iterate over directories. + statics_collector = [] + recursive_process_dirs(template, input_base, output_base, statics_collector) + + generate_sitemap_txt(output_base, statics_collector) diff --git a/src/core/api/app/lib/docker/__init__.py b/src/core/api/app/lib/docker/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/lib/docker/builder.py b/src/core/api/app/lib/docker/builder.py new file mode 100644 index 00000000..2578e8a9 --- /dev/null +++ b/src/core/api/app/lib/docker/builder.py @@ -0,0 +1,278 @@ +# fmt: off + +import io +import tarfile +import tempfile +import time +from pathlib import Path + +import docker + +from lib.config_files import load_as_base64 +from lib.kubernetes import make + +BUILD_NS = "core" +NODE_SELECTOR = {"k8s.datacoves.com/workers": "enabled"} + + +def build(image_def, docker_client=None): + """ + Build a docker image. Takes in a function that defines a docker image. The + first argument passed to image_def is a path to a temporary directory used + as the docker build context. The second argument is an instance of the + dockerfile.Dockerfile class, used to construct a Dockerfile as in the + following example: + + def my_image_definition(ctx: Path, d: docker.Dockerfile): + with open(ctx / "requirements.txt") as f: + f.write("dbt==1.0.0") + + d.FROM("alpine:latest") + d.RUN("apk ...") + d.COPY("requirements.txt", "requirements.txt") + d.RUN("pip install -r requirements.txt") + + docker.build(my_image_definition) + """ + + docker_client = docker_client or docker.from_env() + return run_image_def( + image_def, lambda context_dir: docker_client.images.build(path=str(context_dir)) + ) + + +def build_and_push_with_kaniko(cluster, image_set, image_tag, image_def, ns="core"): + # Runs the builder image_def in a temporary context_dir. + # Makes a tar file of the context_dir. + # Reads it as base64 to put it in a k8s secret to mount in the kaniko pod. + + kc = cluster.kubectl + + context_tar_base64 = run_image_def(image_def, tar_base64) + + build_id = make.string_hash(image_tag) + + docker_context_secret = make.secret_raw( + namespace=ns, + name=f"kaniko-docker-context-{build_id}", + data={"context.tar.gz": context_tar_base64}, + ) + kc.apply(docker_context_secret) + + docker_config_volume = { + "name": "docker-config", + "secret": { + "secretName": cluster.docker_config_secret_name, + "items": [{"key": ".dockerconfigjson", "path": "config.json"}], + }, + } + docker_context_volume = { + "name": "docker-context", + "secret": {"secretName": docker_context_secret["metadata"]["name"]}, + } + hook_volume = { + "name": "hook", + "emptyDir": {} + } + + args_sidecar = [ + "pod-status-webhook", + f"--namespace={BUILD_NS}", + f"--pod=kaniko-{build_id}", + "--container=kaniko", + f"--cluster-id={cluster.id}", + f"--image-tag={image_tag}", + f"--build-id={build_id}", + f"--url-webhook=http://core-api-svc/api/admin/profileimageset/{image_set.id}/done/", + "--token-name-env-var=DATACOVES_API_TOKEN", + "--token-header-name=Token", + ] + container_sidecar_webhook = { + "name": "sidecar-webhook", + "image": ":".join(cluster.get_image("datacovesprivate/sidecar-k8s-monitor")), + "command": ["datacoves"], + "args": args_sidecar, + "volumeMounts": [ + {"name": "hook", "mountPath": "/var/log"}, + ], + "env": [ + { + "name": "DATACOVES_API_TOKEN", + "valueFrom": { + "secretKeyRef": {"name": "api-core-service-account", "key": "token"}, + }, + } + ] + } + + container_kaniko = { + "name": "kaniko", + "image": ":".join(cluster.get_service_image("core", "gcr.io/kaniko-project/executor")), + "args": [ + "--context=tar:///context/context.tar.gz", + f"--destination={image_tag}", + # TODO: check this to decrease memory usage, see https://github.com/GoogleContainerTools/kaniko/issues/909 + "--cache=false", + "--compressed-caching=false", + "--use-new-run", + "--cleanup", + "--digest-file=/var/log/kaniko.log" + ], + "volumeMounts": [ + {"name": docker_config_volume["name"], "mountPath": "/kaniko/.docker"}, + {"name": docker_context_volume["name"], "mountPath": "/context"}, + {"name": hook_volume["name"], "mountPath": "/var/log"}, + ], + } + + if cluster.defines_resource_requests: + container_sidecar_webhook["resources"] = { + "requests": {"memory": "50Mi", "cpu": "50m"}, + "limits": {"memory": "100Mi", "cpu": "100m"}, + } + container_kaniko["resources"] = { + "requests": {"memory": "6Gi", "cpu": "200m"}, + "limits": {"memory": "10Gi", "cpu": "500m"}, + } + spec = { + "containers": [container_kaniko, container_sidecar_webhook], + # NOTE: Assuming the docker_config secret has already been created. + "imagePullSecrets": [{"name": cluster.docker_config_secret_name}], + "serviceAccountName": "api", + "restartPolicy": "Never", + "volumes": [docker_config_volume, docker_context_volume, hook_volume], + "nodeSelector": NODE_SELECTOR, + } + metadata = { + "labels": { + "app": "kaniko", + "k8s.datacoves.com/kanikoBuildId": build_id, + "k8s.datacoves.com/kanikoImage": image_tag.split(":")[0].rsplit("/", 1)[-1], + "k8s.datacoves.com/kanikoProfileId": str(image_set.profile.id), + "k8s.datacoves.com/kanikoProfileName": image_set.profile.name.replace(" ", "_").lower(), + "k8s.datacoves.com/kanikoEnvSlugs": "-".join(image_set.profile.environments.values_list('slug', flat=True)), + } + } + + print(f"Building {image_tag} with kaniko") + + pod = make.pod(f"kaniko-{build_id}", ns, spec, metadata) + kc.apply(pod) + + return build_id + + +def check_kaniko_build(cluster, build_id): + kc = cluster.kubectl + pod = make.pod(f"kaniko-{build_id}", BUILD_NS, {}) + secret = make.secret_raw( + namespace=BUILD_NS, + name=f"kaniko-docker-context-{build_id}", + data={}, + ) + pod_obj = kc.read(pod, raise_404=False) + if pod_obj is None: + return "NotFound", pod_obj + phase = pod_obj.status.phase + + if phase in ("Succeeded", "Failed"): + # Wait for the pod to be deleted to register the metrics + time.sleep(60) + kc.delete(pod) + kc.delete(secret) + + return phase, pod_obj + + +def tar_base64(context_dir): + tar_path = f"{context_dir}.tar.gz" + with tarfile.open(tar_path, "w:gz") as tar: + tar.add(context_dir, arcname=".") + return load_as_base64(tar_path) + + +def run_image_def(image_def, done_callback): + with tempfile.TemporaryDirectory() as context_dir: + context_dir_path = Path(context_dir) + d = Dockerfile() + image_def(context_dir_path, d) + d.write_to(context_dir_path) + return done_callback(context_dir_path) + + +class Dockerfile: + def __init__(self): + self._file = io.BytesIO() + + def __repr__(self): + self._file.seek(0) + s = self._file.read() + self._file.seek(0, 2) + return "" + + def write_to(self, ctx_dir: Path): + with open(ctx_dir / "Dockerfile", "wb") as f: + f.write(self.file().getbuffer()) + + def file(self): + self._file.seek(0) + return self._file + + def _write(self, op, *args): + self._file.write(bytes(op, encoding="utf-8")) + for arg in args: + self._file.write(b" ") + self._file.write(bytes(arg, encoding="utf-8")) + self._file.write(b"\n") + + # https://docs.docker.com/engine/reference/builder/ + def FROM(self, *args): + self._write("FROM", *args) + + def RUN(self, *args): + self._write("RUN", *args) + + def CMD(self, *args): + self._write("CMD", *args) + + def LABEL(self, *args): + self._write("LABEL", *args) + + def EXPOSE(self, *args): + self._write("EXPOSE", *args) + + def ENV(self, *args): + self._write("ENV", *args) + + def ADD(self, *args): + self._write("ADD", *args) + + def COPY(self, *args): + self._write("COPY", *args) + + def ENTRYPOINT(self, *args): + self._write("ENTRYPOINT", *args) + + def VOLUME(self, *args): + self._write("VOLUME", *args) + + def USER(self, *args): + self._write("USER", *args) + + def WORKDIR(self, *args): + self._write("WORKDIR", *args) + + def ARG(self, *args): + self._write("ARG", *args) + + def ONBUILD(self, *args): + self._write("ONBUILD", *args) + + def STOPSIGNAL(self, *args): + self._write("STOPSIGNAL", *args) + + def HEALTHCHECK(self, *args): + self._write("HEALTHCHECK", *args) + + def SHELL(self, *args): + self._write("SHELL", *args) diff --git a/src/core/api/app/lib/docker/docker.py b/src/core/api/app/lib/docker/docker.py new file mode 100644 index 00000000..4983b5cb --- /dev/null +++ b/src/core/api/app/lib/docker/docker.py @@ -0,0 +1,51 @@ +# Not so straightforward to interpret docker image "names" (reference to repos). +# "Repo" doesn't mean what you would think. See: +# https://docs.docker.com/glossary/#repository +# https://docs.docker.com/glossary/#registry +# https://stackoverflow.com/a/37867949 + + +def docker_image_name(img, registry): + assert not registry or looks_like_a_registry(registry) + img_reg, img = docker_image_name_to_registry_and_repo(img) + # All four combinations of (img_reg, registry) are handled. If registry and + # img_reg are both set, the resulting name will be {registry}/{img_reg}/{img}. + return docker_image_registry_and_repo_to_name( + registry, + docker_image_registry_and_repo_to_name(img_reg, img), + ) + + +def docker_image_tag(img, release): + images = release["images"].copy() + extra_images = release.get("observability_images", []) + release.get( + "core_images", [] + ) + for image in extra_images: + name, tag = image.split(":") + images[name] = tag + + return images[img] + + +def docker_image_name_and_tag(img, registry, release): + name = docker_image_name(img, registry) + tag = docker_image_tag(img, release) + return f"{name}:{tag}" + + +def looks_like_a_registry(registry): + return "." in registry or ":" in registry or registry == "localhost" + + +def docker_image_name_to_registry_and_repo(img): + img_reg, *img_path = img.split("/") + if looks_like_a_registry(img_reg): + img = "/".join(img_path) + else: + img_reg = "" + return img_reg, img + + +def docker_image_registry_and_repo_to_name(reg, repo): + return f"{reg}/{repo}" if reg else repo diff --git a/src/core/api/app/lib/gen.py b/src/core/api/app/lib/gen.py new file mode 100644 index 00000000..7c827e33 --- /dev/null +++ b/src/core/api/app/lib/gen.py @@ -0,0 +1,80 @@ +""" +Simple python metaprogramming by convention. + +You write: + {module_name}_generator.py (a program that generates {module_name}.py) + {module_name}_template.py (optional, input code to the generator) + +Running {module_name}_generator.py generates {module_name}.py. + +Generator scaffold: + +```python +from lib import gen + +def generate(): + # A dict mapping fragment names to strings that will be spliced into {module_name}_template.py + fragments = {} + + # YOUR CODE HERE. + + return fragments + +if __name__ == "__main__": + gen.render(gen.output_path(__file__), generate()) +``` + +Where the template has comments of the form `# gen: {fragment_name}` gen.render +will insert the strings in the fragments dictionary. +""" + +GENERATOR_SUFFIX = "_generator.py" +TEMPLATE_SUFFIX = "_template.py" + + +def prefix(file): + if file.endswith(GENERATOR_SUFFIX): + return file[: -len(GENERATOR_SUFFIX)] + elif file.endswith(TEMPLATE_SUFFIX): + return file[: -len(TEMPLATE_SUFFIX)] + elif file.endswith(".py"): + return file[: -len(".py")] + else: + return file + + +def generator_path(file): + return prefix(file) + GENERATOR_SUFFIX + + +def template_path(file): + return prefix(file) + TEMPLATE_SUFFIX + + +def output_path(file): + return prefix(file) + ".py" + + +def render(file, fragments): + path_out = output_path(file) + path_templ = template_path(file) + template = "" + with open(path_templ, "r") as template_file: + template = template_file.read() + if template.startswith("raise"): + template = template[template.index("\n") + 1 :] + # NOTE: quick and dirty... + for fragment_name, fragment in sorted(fragments.items()): + template = template.replace(f"# gen: {fragment_name}", fragment) + with open(path_out, "w+") as output_file: + print(template, file=output_file) + + +def emitter(fragments, fragment_name): + """Returns a print like function that prints to fragments[fragment_name].""" + fragments[fragment_name] = "" + + def emit(*args, end="\n"): + fragments[fragment_name] += " ".join(args) + end + + return emit diff --git a/src/core/api/app/lib/kubernetes/__init__.py b/src/core/api/app/lib/kubernetes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/lib/kubernetes/client.py b/src/core/api/app/lib/kubernetes/client.py new file mode 100644 index 00000000..82b460f1 --- /dev/null +++ b/src/core/api/app/lib/kubernetes/client.py @@ -0,0 +1,689 @@ +import datetime +import logging +from functools import cached_property + +import kubernetes.client as kclient +import kubernetes.client.api as kapi +import kubernetes.config as kconfig +from kubernetes.client.exceptions import ApiException + +# fmt: off +DISPATCH_CREATE = { + ('v1', 'Binding'): ('CoreV1Api', 'create_namespaced_binding', True), # noqa + ('v1', 'ConfigMap'): ('CoreV1Api', 'create_namespaced_config_map', True), # noqa + ('v1', 'Endpoints'): ('CoreV1Api', 'create_namespaced_endpoints', True), # noqa + ('v1', 'Event'): ('CoreV1Api', 'create_namespaced_event', True), # noqa + ('v1', 'LimitRange'): ('CoreV1Api', 'create_namespaced_limit_range', True), # noqa + ('v1', 'Namespace'): ('CoreV1Api', 'create_namespace', False), # noqa + ('v1', 'Node'): ('CoreV1Api', 'create_node', False), # noqa + ('v1', 'PersistentVolumeClaim'): ('CoreV1Api', 'create_namespaced_persistent_volume_claim', True), # noqa + ('v1', 'PersistentVolume'): ('CoreV1Api', 'create_persistent_volume', False), # noqa + ('v1', 'Pod'): ('CoreV1Api', 'create_namespaced_pod', True), # noqa + ('v1', 'PodTemplate'): ('CoreV1Api', 'create_namespaced_pod_template', True), # noqa + ('v1', 'ReplicationController'): ('CoreV1Api', 'create_namespaced_replication_controller', True), # noqa + ('v1', 'ResourceQuota'): ('CoreV1Api', 'create_namespaced_resource_quota', True), # noqa + ('v1', 'Secret'): ('CoreV1Api', 'create_namespaced_secret', True), # noqa + ('v1', 'ServiceAccount'): ('CoreV1Api', 'create_namespaced_service_account', True), # noqa + ('v1', 'Service'): ('CoreV1Api', 'create_namespaced_service', True), # noqa + ('admissionregistration.k8s.io/v1', 'MutatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'create_mutating_webhook_configuration', False), # noqa + ('admissionregistration.k8s.io/v1', 'ValidatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'create_validating_webhook_configuration', False), # noqa + ('apiextensions.k8s.io/v1', 'CustomResourceDefinition'): ('ApiextensionsV1Api', 'create_custom_resource_definition', False), # noqa + ('apiregistration.k8s.io/v1', 'APIService'): ('ApiregistrationV1Api', 'create_api_service', False), # noqa + ('apps/v1', 'ControllerRevision'): ('AppsV1Api', 'create_namespaced_controller_revision', True), # noqa + ('apps/v1', 'DaemonSet'): ('AppsV1Api', 'create_namespaced_daemon_set', True), # noqa + ('apps/v1', 'Deployment'): ('AppsV1Api', 'create_namespaced_deployment', True), # noqa + ('apps/v1', 'ReplicaSet'): ('AppsV1Api', 'create_namespaced_replica_set', True), # noqa + ('apps/v1', 'StatefulSet'): ('AppsV1Api', 'create_namespaced_stateful_set', True), # noqa + ('authentication.k8s.io/v1', 'TokenReview'): ('AuthenticationV1Api', 'create_token_review', False), # noqa + ('authorization.k8s.io/v1', 'LocalSubjectAccessReview'): ('AuthorizationV1Api', 'create_namespaced_local_subject_access_review', True), # noqa + ('authorization.k8s.io/v1', 'SelfSubjectAccessReview'): ('AuthorizationV1Api', 'create_self_subject_access_review', False), # noqa + ('authorization.k8s.io/v1', 'SelfSubjectRulesReview'): ('AuthorizationV1Api', 'create_self_subject_rules_review', False), # noqa + ('authorization.k8s.io/v1', 'SubjectAccessReview'): ('AuthorizationV1Api', 'create_subject_access_review', False), # noqa + ('batch/v1', 'CronJob'): ('BatchV1Api', 'create_namespaced_cron_job', True), # noqa + ('batch/v1', 'Job'): ('BatchV1Api', 'create_namespaced_job', True), # noqa + ('certificates.k8s.io/v1', 'CertificateSigningRequest'): ('CertificatesV1Api', 'create_certificate_signing_request', False), # noqa + ('coordination.k8s.io/v1', 'Lease'): ('CoordinationV1Api', 'create_namespaced_lease', True), # noqa + ('events.k8s.io/v1', 'Event'): ('EventsV1Api', 'create_namespaced_event', True), # noqa + ('networking.k8s.io/v1', 'IngressClass'): ('NetworkingV1Api', 'create_ingress_class', False), # noqa + ('networking.k8s.io/v1', 'Ingress'): ('NetworkingV1Api', 'create_namespaced_ingress', True), # noqa + ('networking.k8s.io/v1', 'NetworkPolicy'): ('NetworkingV1Api', 'create_namespaced_network_policy', True), # noqa + ('node.k8s.io/v1', 'RuntimeClass'): ('NodeV1Api', 'create_runtime_class', False), # noqa + ('policy/v1beta1', 'PodSecurityPolicy'): ('PolicyV1beta1Api', 'create_pod_security_policy', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRoleBinding'): ('RbacAuthorizationV1Api', 'create_cluster_role_binding', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRole'): ('RbacAuthorizationV1Api', 'create_cluster_role', False), # noqa + ('rbac.authorization.k8s.io/v1', 'RoleBinding'): ('RbacAuthorizationV1Api', 'create_namespaced_role_binding', True), # noqa + ('rbac.authorization.k8s.io/v1', 'Role'): ('RbacAuthorizationV1Api', 'create_namespaced_role', True), # noqa + ('scheduling.k8s.io/v1', 'PriorityClass'): ('SchedulingV1Api', 'create_priority_class', False), # noqa + ('storage.k8s.io/v1', 'CSIDriver'): ('StorageV1Api', 'create_csi_driver', False), # noqa + ('storage.k8s.io/v1', 'CSINode'): ('StorageV1Api', 'create_csi_node', False), # noqa + ('storage.k8s.io/v1beta1', 'CSIStorageCapacity'): ('StorageV1beta1Api', 'create_namespaced_csi_storage_capacity', True), # noqa + ('storage.k8s.io/v1', 'StorageClass'): ('StorageV1Api', 'create_storage_class', False), # noqa + ('storage.k8s.io/v1', 'VolumeAttachment'): ('StorageV1Api', 'create_volume_attachment', False), # noqa + ('datacoves.com/v1', 'Account'): ('DatacovesApi', 'create_namespaced_account', True), # noqa + ('datacoves.com/v1', 'HelmRelease'): ('DatacovesApi', 'create_namespaced_helm_release', True), # noqa + ('datacoves.com/v1', 'User'): ('DatacovesApi', 'create_namespaced_user', True), # noqa + ('datacoves.com/v1', 'Workspace'): ('DatacovesApi', 'create_namespaced_workspace', True), # noqa + ('monitoring.coreos.com/v1', 'ServiceMonitor'): ('MonitoringCoreosComV1Api', 'create_namespaced_service_monitor', True), # noqa +} +DISPATCH_READ = { + ('v1', 'ComponentStatus'): ('CoreV1Api', 'read_component_status', False), # noqa + ('v1', 'ConfigMap'): ('CoreV1Api', 'read_namespaced_config_map', True), # noqa + ('v1', 'Endpoints'): ('CoreV1Api', 'read_namespaced_endpoints', True), # noqa + ('v1', 'Event'): ('CoreV1Api', 'read_namespaced_event', True), # noqa + ('v1', 'LimitRange'): ('CoreV1Api', 'read_namespaced_limit_range', True), # noqa + ('v1', 'Namespace'): ('CoreV1Api', 'read_namespace', False), # noqa + ('v1', 'Node'): ('CoreV1Api', 'read_node', False), # noqa + ('v1', 'PersistentVolumeClaim'): ('CoreV1Api', 'read_namespaced_persistent_volume_claim', True), # noqa + ('v1', 'PersistentVolume'): ('CoreV1Api', 'read_persistent_volume', False), # noqa + ('v1', 'Pod'): ('CoreV1Api', 'read_namespaced_pod', True), # noqa + ('v1', 'PodTemplate'): ('CoreV1Api', 'read_namespaced_pod_template', True), # noqa + ('v1', 'ReplicationController'): ('CoreV1Api', 'read_namespaced_replication_controller', True), # noqa + ('v1', 'ResourceQuota'): ('CoreV1Api', 'read_namespaced_resource_quota', True), # noqa + ('v1', 'Secret'): ('CoreV1Api', 'read_namespaced_secret', True), # noqa + ('v1', 'ServiceAccount'): ('CoreV1Api', 'read_namespaced_service_account', True), # noqa + ('v1', 'Service'): ('CoreV1Api', 'read_namespaced_service', True), # noqa + ('admissionregistration.k8s.io/v1', 'MutatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'read_mutating_webhook_configuration', False), # noqa + ('admissionregistration.k8s.io/v1', 'ValidatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'read_validating_webhook_configuration', False), # noqa + ('apiextensions.k8s.io/v1', 'CustomResourceDefinition'): ('ApiextensionsV1Api', 'read_custom_resource_definition', False), # noqa + ('apiregistration.k8s.io/v1', 'APIService'): ('ApiregistrationV1Api', 'read_api_service', False), # noqa + ('apps/v1', 'ControllerRevision'): ('AppsV1Api', 'read_namespaced_controller_revision', True), # noqa + ('apps/v1', 'DaemonSet'): ('AppsV1Api', 'read_namespaced_daemon_set', True), # noqa + ('apps/v1', 'Deployment'): ('AppsV1Api', 'read_namespaced_deployment', True), # noqa + ('apps/v1', 'ReplicaSet'): ('AppsV1Api', 'read_namespaced_replica_set', True), # noqa + ('apps/v1', 'StatefulSet'): ('AppsV1Api', 'read_namespaced_stateful_set', True), # noqa + ('batch/v1', 'CronJob'): ('BatchV1Api', 'read_namespaced_cron_job', True), # noqa + ('batch/v1', 'Job'): ('BatchV1Api', 'read_namespaced_job', True), # noqa + ('certificates.k8s.io/v1', 'CertificateSigningRequest'): ('CertificatesV1Api', 'read_certificate_signing_request', False), # noqa + ('coordination.k8s.io/v1', 'Lease'): ('CoordinationV1Api', 'read_namespaced_lease', True), # noqa + ('events.k8s.io/v1', 'Event'): ('EventsV1Api', 'read_namespaced_event', True), # noqa + ('networking.k8s.io/v1', 'IngressClass'): ('NetworkingV1Api', 'read_ingress_class', False), # noqa + ('networking.k8s.io/v1', 'Ingress'): ('NetworkingV1Api', 'read_namespaced_ingress', True), # noqa + ('networking.k8s.io/v1', 'NetworkPolicy'): ('NetworkingV1Api', 'read_namespaced_network_policy', True), # noqa + ('node.k8s.io/v1', 'RuntimeClass'): ('NodeV1Api', 'read_runtime_class', False), # noqa + ('policy/v1beta1', 'PodSecurityPolicy'): ('PolicyV1beta1Api', 'read_pod_security_policy', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRoleBinding'): ('RbacAuthorizationV1Api', 'read_cluster_role_binding', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRole'): ('RbacAuthorizationV1Api', 'read_cluster_role', False), # noqa + ('rbac.authorization.k8s.io/v1', 'RoleBinding'): ('RbacAuthorizationV1Api', 'read_namespaced_role_binding', True), # noqa + ('rbac.authorization.k8s.io/v1', 'Role'): ('RbacAuthorizationV1Api', 'read_namespaced_role', True), # noqa + ('scheduling.k8s.io/v1', 'PriorityClass'): ('SchedulingV1Api', 'read_priority_class', False), # noqa + ('storage.k8s.io/v1', 'CSIDriver'): ('StorageV1Api', 'read_csi_driver', False), # noqa + ('storage.k8s.io/v1', 'CSINode'): ('StorageV1Api', 'read_csi_node', False), # noqa + ('storage.k8s.io/v1beta1', 'CSIStorageCapacity'): ('StorageV1beta1Api', 'read_namespaced_csi_storage_capacity', True), # noqa + ('storage.k8s.io/v1', 'StorageClass'): ('StorageV1Api', 'read_storage_class', False), # noqa + ('storage.k8s.io/v1', 'VolumeAttachment'): ('StorageV1Api', 'read_volume_attachment', False), # noqa + ('datacoves.com/v1', 'Account'): ('DatacovesApi', 'read_namespaced_account', True), # noqa + ('datacoves.com/v1', 'HelmRelease'): ('DatacovesApi', 'read_namespaced_helm_release', True), # noqa + ('datacoves.com/v1', 'User'): ('DatacovesApi', 'read_namespaced_user', True), # noqa + ('datacoves.com/v1', 'Workspace'): ('DatacovesApi', 'read_namespaced_workspace', True), # noqa + ('monitoring.coreos.com/v1', 'ServiceMonitor'): ('MonitoringCoreosComV1Api', 'read_namespaced_service_monitor', True), # noqa +} +DISPATCH_REPLACE = { + ('v1', 'ConfigMap'): ('CoreV1Api', 'replace_namespaced_config_map', True), # noqa + ('v1', 'Endpoints'): ('CoreV1Api', 'replace_namespaced_endpoints', True), # noqa + ('v1', 'Event'): ('CoreV1Api', 'replace_namespaced_event', True), # noqa + ('v1', 'LimitRange'): ('CoreV1Api', 'replace_namespaced_limit_range', True), # noqa + ('v1', 'Namespace'): ('CoreV1Api', 'replace_namespace', False), # noqa + ('v1', 'Node'): ('CoreV1Api', 'replace_node', False), # noqa + ('v1', 'PersistentVolumeClaim'): ('CoreV1Api', 'replace_namespaced_persistent_volume_claim', True), # noqa + ('v1', 'PersistentVolume'): ('CoreV1Api', 'replace_persistent_volume', False), # noqa + ('v1', 'Pod'): ('CoreV1Api', 'replace_namespaced_pod', True), # noqa + ('v1', 'PodTemplate'): ('CoreV1Api', 'replace_namespaced_pod_template', True), # noqa + ('v1', 'ReplicationController'): ('CoreV1Api', 'replace_namespaced_replication_controller', True), # noqa + ('v1', 'ResourceQuota'): ('CoreV1Api', 'replace_namespaced_resource_quota', True), # noqa + ('v1', 'Secret'): ('CoreV1Api', 'replace_namespaced_secret', True), # noqa + ('v1', 'ServiceAccount'): ('CoreV1Api', 'replace_namespaced_service_account', True), # noqa + ('v1', 'Service'): ('CoreV1Api', 'replace_namespaced_service', True), # noqa + ('admissionregistration.k8s.io/v1', 'MutatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'replace_mutating_webhook_configuration', False), # noqa + ('admissionregistration.k8s.io/v1', 'ValidatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'replace_validating_webhook_configuration', False), # noqa + ('apiextensions.k8s.io/v1', 'CustomResourceDefinition'): ('ApiextensionsV1Api', 'replace_custom_resource_definition', False), # noqa + ('apiregistration.k8s.io/v1', 'APIService'): ('ApiregistrationV1Api', 'replace_api_service', False), # noqa + ('apps/v1', 'ControllerRevision'): ('AppsV1Api', 'replace_namespaced_controller_revision', True), # noqa + ('apps/v1', 'DaemonSet'): ('AppsV1Api', 'replace_namespaced_daemon_set', True), # noqa + ('apps/v1', 'Deployment'): ('AppsV1Api', 'replace_namespaced_deployment', True), # noqa + ('apps/v1', 'ReplicaSet'): ('AppsV1Api', 'replace_namespaced_replica_set', True), # noqa + ('apps/v1', 'StatefulSet'): ('AppsV1Api', 'replace_namespaced_stateful_set', True), # noqa + ('batch/v1', 'CronJob'): ('BatchV1Api', 'replace_namespaced_cron_job', True), # noqa + ('batch/v1', 'Job'): ('BatchV1Api', 'replace_namespaced_job', True), # noqa + ('certificates.k8s.io/v1', 'CertificateSigningRequest'): ('CertificatesV1Api', 'replace_certificate_signing_request', False), # noqa + ('coordination.k8s.io/v1', 'Lease'): ('CoordinationV1Api', 'replace_namespaced_lease', True), # noqa + ('events.k8s.io/v1', 'Event'): ('EventsV1Api', 'replace_namespaced_event', True), # noqa + ('networking.k8s.io/v1', 'IngressClass'): ('NetworkingV1Api', 'replace_ingress_class', False), # noqa + ('networking.k8s.io/v1', 'Ingress'): ('NetworkingV1Api', 'replace_namespaced_ingress', True), # noqa + ('networking.k8s.io/v1', 'NetworkPolicy'): ('NetworkingV1Api', 'replace_namespaced_network_policy', True), # noqa + ('node.k8s.io/v1', 'RuntimeClass'): ('NodeV1Api', 'replace_runtime_class', False), # noqa + ('policy/v1beta1', 'PodSecurityPolicy'): ('PolicyV1beta1Api', 'replace_pod_security_policy', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRoleBinding'): ('RbacAuthorizationV1Api', 'replace_cluster_role_binding', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRole'): ('RbacAuthorizationV1Api', 'replace_cluster_role', False), # noqa + ('rbac.authorization.k8s.io/v1', 'RoleBinding'): ('RbacAuthorizationV1Api', 'replace_namespaced_role_binding', True), # noqa + ('rbac.authorization.k8s.io/v1', 'Role'): ('RbacAuthorizationV1Api', 'replace_namespaced_role', True), # noqa + ('scheduling.k8s.io/v1', 'PriorityClass'): ('SchedulingV1Api', 'replace_priority_class', False), # noqa + ('storage.k8s.io/v1', 'CSIDriver'): ('StorageV1Api', 'replace_csi_driver', False), # noqa + ('storage.k8s.io/v1', 'CSINode'): ('StorageV1Api', 'replace_csi_node', False), # noqa + ('storage.k8s.io/v1beta1', 'CSIStorageCapacity'): ('StorageV1beta1Api', 'replace_namespaced_csi_storage_capacity', True), # noqa + ('storage.k8s.io/v1', 'StorageClass'): ('StorageV1Api', 'replace_storage_class', False), # noqa + ('storage.k8s.io/v1', 'VolumeAttachment'): ('StorageV1Api', 'replace_volume_attachment', False), # noqa + ('datacoves.com/v1', 'Account'): ('DatacovesApi', 'replace_namespaced_account', True), # noqa + ('datacoves.com/v1', 'HelmRelease'): ('DatacovesApi', 'replace_namespaced_helm_release', True), # noqa + ('datacoves.com/v1', 'User'): ('DatacovesApi', 'replace_namespaced_user', True), # noqa + ('datacoves.com/v1', 'Workspace'): ('DatacovesApi', 'replace_namespaced_workspace', True), # noqa + ('monitoring.coreos.com/v1', 'ServiceMonitor'): ('MonitoringCoreosComV1Api', 'replace_namespaced_service_monitor', True), # noqa +} +DISPATCH_DELETE = { + ('v1', 'ConfigMap'): ('CoreV1Api', 'delete_namespaced_config_map', True), # noqa + ('v1', 'Endpoints'): ('CoreV1Api', 'delete_namespaced_endpoints', True), # noqa + ('v1', 'Event'): ('CoreV1Api', 'delete_namespaced_event', True), # noqa + ('v1', 'LimitRange'): ('CoreV1Api', 'delete_namespaced_limit_range', True), # noqa + ('v1', 'Namespace'): ('CoreV1Api', 'delete_namespace', False), # noqa + ('v1', 'Node'): ('CoreV1Api', 'delete_node', False), # noqa + ('v1', 'PersistentVolumeClaim'): ('CoreV1Api', 'delete_namespaced_persistent_volume_claim', True), # noqa + ('v1', 'PersistentVolume'): ('CoreV1Api', 'delete_persistent_volume', False), # noqa + ('v1', 'Pod'): ('CoreV1Api', 'delete_namespaced_pod', True), # noqa + ('v1', 'PodTemplate'): ('CoreV1Api', 'delete_namespaced_pod_template', True), # noqa + ('v1', 'ReplicationController'): ('CoreV1Api', 'delete_namespaced_replication_controller', True), # noqa + ('v1', 'ResourceQuota'): ('CoreV1Api', 'delete_namespaced_resource_quota', True), # noqa + ('v1', 'Secret'): ('CoreV1Api', 'delete_namespaced_secret', True), # noqa + ('v1', 'ServiceAccount'): ('CoreV1Api', 'delete_namespaced_service_account', True), # noqa + ('v1', 'Service'): ('CoreV1Api', 'delete_namespaced_service', True), # noqa + ('admissionregistration.k8s.io/v1', 'MutatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'delete_mutating_webhook_configuration', False), # noqa + ('admissionregistration.k8s.io/v1', 'ValidatingWebhookConfiguration'): ('AdmissionregistrationV1Api', 'delete_validating_webhook_configuration', False), # noqa + ('apiextensions.k8s.io/v1', 'CustomResourceDefinition'): ('ApiextensionsV1Api', 'delete_custom_resource_definition', False), # noqa + ('apiregistration.k8s.io/v1', 'APIService'): ('ApiregistrationV1Api', 'delete_api_service', False), # noqa + ('apps/v1', 'ControllerRevision'): ('AppsV1Api', 'delete_namespaced_controller_revision', True), # noqa + ('apps/v1', 'DaemonSet'): ('AppsV1Api', 'delete_namespaced_daemon_set', True), # noqa + ('apps/v1', 'Deployment'): ('AppsV1Api', 'delete_namespaced_deployment', True), # noqa + ('apps/v1', 'ReplicaSet'): ('AppsV1Api', 'delete_namespaced_replica_set', True), # noqa + ('apps/v1', 'StatefulSet'): ('AppsV1Api', 'delete_namespaced_stateful_set', True), # noqa + ('batch/v1', 'CronJob'): ('BatchV1Api', 'delete_namespaced_cron_job', True), # noqa + ('batch/v1', 'Job'): ('BatchV1Api', 'delete_namespaced_job', True), # noqa + ('certificates.k8s.io/v1', 'CertificateSigningRequest'): ('CertificatesV1Api', 'delete_certificate_signing_request', False), # noqa + ('coordination.k8s.io/v1', 'Lease'): ('CoordinationV1Api', 'delete_namespaced_lease', True), # noqa + ('events.k8s.io/v1', 'Event'): ('EventsV1Api', 'delete_namespaced_event', True), # noqa + ('networking.k8s.io/v1', 'IngressClass'): ('NetworkingV1Api', 'delete_ingress_class', False), # noqa + ('networking.k8s.io/v1', 'Ingress'): ('NetworkingV1Api', 'delete_namespaced_ingress', True), # noqa + ('networking.k8s.io/v1', 'NetworkPolicy'): ('NetworkingV1Api', 'delete_namespaced_network_policy', True), # noqa + ('node.k8s.io/v1', 'RuntimeClass'): ('NodeV1Api', 'delete_runtime_class', False), # noqa + ('policy/v1beta1', 'PodSecurityPolicy'): ('PolicyV1beta1Api', 'delete_pod_security_policy', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRoleBinding'): ('RbacAuthorizationV1Api', 'delete_cluster_role_binding', False), # noqa + ('rbac.authorization.k8s.io/v1', 'ClusterRole'): ('RbacAuthorizationV1Api', 'delete_cluster_role', False), # noqa + ('rbac.authorization.k8s.io/v1', 'RoleBinding'): ('RbacAuthorizationV1Api', 'delete_namespaced_role_binding', True), # noqa + ('rbac.authorization.k8s.io/v1', 'Role'): ('RbacAuthorizationV1Api', 'delete_namespaced_role', True), # noqa + ('scheduling.k8s.io/v1', 'PriorityClass'): ('SchedulingV1Api', 'delete_priority_class', False), # noqa + ('storage.k8s.io/v1', 'CSIDriver'): ('StorageV1Api', 'delete_csi_driver', False), # noqa + ('storage.k8s.io/v1', 'CSINode'): ('StorageV1Api', 'delete_csi_node', False), # noqa + ('storage.k8s.io/v1beta1', 'CSIStorageCapacity'): ('StorageV1beta1Api', 'delete_namespaced_csi_storage_capacity', True), # noqa + ('storage.k8s.io/v1', 'StorageClass'): ('StorageV1Api', 'delete_storage_class', False), # noqa + ('storage.k8s.io/v1', 'VolumeAttachment'): ('StorageV1Api', 'delete_volume_attachment', False), # noqa + ('datacoves.com/v1', 'Account'): ('DatacovesApi', 'delete_namespaced_account', True), # noqa + ('datacoves.com/v1', 'HelmRelease'): ('DatacovesApi', 'delete_namespaced_helm_release', True), # noqa + ('datacoves.com/v1', 'User'): ('DatacovesApi', 'delete_namespaced_user', True), # noqa + ('datacoves.com/v1', 'Workspace'): ('DatacovesApi', 'delete_namespaced_workspace', True), # noqa + ('monitoring.coreos.com/v1', 'ServiceMonitor'): ('MonitoringCoreosComV1Api', 'delete_namespaced_service_monitor', True), # noqa +} +# fmt: on + + +class CustomDatacovesApi(object): + def __init__(self, kc): + self.kc = kc + + def create_namespaced_account(self, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.create_namespaced_custom_object( + "datacoves.com", "v1", namespace, "accounts", res, **kwargs + ) + + def read_namespaced_account(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.get_namespaced_custom_object( + "datacoves.com", "v1", namespace, "accounts", name, **kwargs + ) + + def replace_namespaced_account(self, name, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.replace_namespaced_custom_object( + "datacoves.com", "v1", namespace, "accounts", name, res, **kwargs + ) + + def delete_namespaced_account(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.delete_namespaced_custom_object( + "datacoves.com", "v1", namespace, "accounts", name, **kwargs + ) + + def create_namespaced_helm_release(self, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.create_namespaced_custom_object( + "datacoves.com", "v1", namespace, "helmreleases", res, **kwargs + ) + + def read_namespaced_helm_release(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.get_namespaced_custom_object( + "datacoves.com", "v1", namespace, "helmreleases", name, **kwargs + ) + + def replace_namespaced_helm_release(self, name, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.replace_namespaced_custom_object( + "datacoves.com", "v1", namespace, "helmreleases", name, res, **kwargs + ) + + def delete_namespaced_helm_release(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.delete_namespaced_custom_object( + "datacoves.com", "v1", namespace, "helmreleases", name, **kwargs + ) + + def create_namespaced_user(self, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.create_namespaced_custom_object( + "datacoves.com", "v1", namespace, "users", res, **kwargs + ) + + def read_namespaced_user(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.get_namespaced_custom_object( + "datacoves.com", "v1", namespace, "users", name, **kwargs + ) + + def replace_namespaced_user(self, name, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.replace_namespaced_custom_object( + "datacoves.com", "v1", namespace, "users", name, res, **kwargs + ) + + def delete_namespaced_user(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.delete_namespaced_custom_object( + "datacoves.com", "v1", namespace, "users", name, **kwargs + ) + + def create_namespaced_workspace(self, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.create_namespaced_custom_object( + "datacoves.com", "v1", namespace, "workspaces", res, **kwargs + ) + + def read_namespaced_workspace(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.get_namespaced_custom_object( + "datacoves.com", "v1", namespace, "workspaces", name, **kwargs + ) + + def replace_namespaced_workspace(self, name, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.replace_namespaced_custom_object( + "datacoves.com", "v1", namespace, "workspaces", name, res, **kwargs + ) + + def delete_namespaced_workspace(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.delete_namespaced_custom_object( + "datacoves.com", "v1", namespace, "workspaces", name, **kwargs + ) + + +class CustomMonitoringCoreosComV1Api(object): + def __init__(self, kc): + self.kc = kc + + def create_namespaced_service_monitor(self, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.create_namespaced_custom_object( + "monitoring.coreos.com", "v1", namespace, "servicemonitors", res, **kwargs + ) + + def read_namespaced_service_monitor(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.get_namespaced_custom_object( + "monitoring.coreos.com", "v1", namespace, "servicemonitors", name, **kwargs + ) + + def replace_namespaced_service_monitor(self, name, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.replace_namespaced_custom_object( + "monitoring.coreos.com", + "v1", + namespace, + "servicemonitors", + name, + res, + **kwargs, + ) + + def delete_namespaced_service_monitor(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.delete_namespaced_custom_object( + "monitoring.coreos.com", "v1", namespace, "servicemonitors", name, **kwargs + ) + + +class CustomPolicyV1beta1Api(object): + def __init__(self, kc): + self.kc = kc + + +class CustomStorageV1beta1Api(object): + def __init__(self, kc): + self.kc = kc + + def create_namespaced_csi_storage_capacity(self, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.create_namespaced_custom_object( + "storage.k8s.io", "v1beta1", namespace, "csistoragecapacitys", res, **kwargs + ) + + def read_namespaced_csi_storage_capacity(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.get_namespaced_custom_object( + "storage.k8s.io", + "v1beta1", + namespace, + "csistoragecapacitys", + name, + **kwargs, + ) + + def replace_namespaced_csi_storage_capacity(self, name, namespace, res, **kwargs): + return self.kc.CustomObjectsApi.replace_namespaced_custom_object( + "storage.k8s.io", + "v1beta1", + namespace, + "csistoragecapacitys", + name, + res, + **kwargs, + ) + + def delete_namespaced_csi_storage_capacity(self, name, namespace, **kwargs): + return self.kc.CustomObjectsApi.delete_namespaced_custom_object( + "storage.k8s.io", + "v1beta1", + namespace, + "csistoragecapacitys", + name, + **kwargs, + ) + + +logger = logging.getLogger(__name__) + + +class Kubectl: + def __init__(self, client=None, config=None, in_cluster=True): + if not client: + if not config: + if in_cluster: + config = kconfig.load_incluster_config() + else: + config = kconfig.load_kube_config() + client = kclient.ApiClient(configuration=config) + self.client = client + + def apply_resources(self, namespace, resources, log=None): + for res in resources: + ty = (res["apiVersion"], res["kind"]) + assert ty in DISPATCH_CREATE, f"unrecognized resource type: {ty}" + namespaced = DISPATCH_CREATE[ty][2] + if namespaced and res.get("metadata", {}).get("namespace") is None: + res["metadata"]["namespace"] = namespace + + for res in resources: + self.apply(res, log=log) + + def apply(self, resource, log=None): + written, created, ret_obj = self.update_or_create(resource) + + if callable(log): + api_version, kind, name = self.get_resource_metadata(resource) + namespace = self.get_resource_namespace(resource) + log(f"{'Created' if created else 'Updated'} {kind} {namespace}/{name}") + + return written, created, ret_obj + + def update_or_create(self, res, **kwargs): + written, created = False, False + obj = self.read(res, **kwargs) + if obj is None: + ret_obj = self.create(res, **kwargs) + written = True + created = True + return written, created, ret_obj + + if hasattr(obj, "immutable"): + return written, created, obj + + # We have to send the resourceVersion we got. If there's a modification in + # between our GET and PUT, the PUT will fail. + resource_version = ( + obj["metadata"]["resourceVersion"] + if isinstance(obj, dict) + else obj.metadata.resource_version + ) + if isinstance(res, dict): + res["metadata"]["resourceVersion"] = resource_version + else: + res.metadata.resource_version = resource_version + + ret_obj = self.replace(res, **kwargs) + written = True + return written, created, ret_obj + + def create(self, res, **kwargs): + api_version, kind, _ = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_CREATE[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + namespace = self.get_resource_namespace(res) if namespaced else None + return fn(namespace, res, **kwargs) if namespace else fn(res, **kwargs) + + def read(self, res, raise_404=False, **kwargs): + api_version, kind, name = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_READ[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + try: + namespace = self.get_resource_namespace(res) if namespaced else None + return fn(name, namespace, **kwargs) if namespace else fn(name, **kwargs) + except kclient.exceptions.ApiException as e: + if raise_404 or e.status != 404: + raise e + return None + + def replace(self, res, **kwargs): + api_version, kind, name = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_REPLACE[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + namespace = self.get_resource_namespace(res) if namespaced else None + return ( + fn(name, namespace, res, **kwargs) if namespace else fn(name, res, **kwargs) + ) + + def delete(self, res, raise_404=False, **kwargs): + api_version, kind, name = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_DELETE[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + try: + namespace = self.get_resource_namespace(res) if namespaced else None + return fn(name, namespace, **kwargs) if namespace else fn(name, **kwargs) + except kclient.exceptions.ApiException as e: + if raise_404 or e.status != 404: + raise e + + def get_resource_namespace(self, res) -> str: + return ( + res["metadata"]["namespace"] + if isinstance(res, dict) + else res.metadata.namespace + ) + + def get_resource_metadata(self, res) -> tuple: + if isinstance(res, dict): + api_version = res["apiVersion"] + kind = res["kind"] + name = res["metadata"]["name"] + else: + api_version = res.api_version + kind = res.kind + name = res.metadata.name + + if api_version is None and isinstance(res, kclient.V1NetworkPolicy): + api_version = "networking.k8s.io/v1" + kind = "NetworkPolicy" + + return api_version, kind, name + + def get_cluster_apiserver_ips(self) -> dict: + try: + endpoints = self.CoreV1Api.read_namespaced_endpoints( + namespace="default", name="kubernetes" + ) + ips = [] + ports = [] + for subsets in endpoints.subsets: + for address in subsets.addresses: + ips.append(address.ip) + + ports_filtered = filter( + lambda item: item.name == "https", subsets.ports + ) + ports_aux = list(map(lambda item: item.port, ports_filtered)) + if ports_aux and ports_aux[0] not in ports: + ports.append(ports_aux[0]) + + return {"ips": ips, "ports": ports} + + except Exception as e: + logger.error("Cluster api server: %s", e.__str__()) + return {} + + def get_ingress_controller_ips(self): + service = self.CoreV1Api.read_namespaced_service( + "ingress-nginx-controller", "ingress-nginx" + ) + internal_ip = service.spec.cluster_ip + external_ip = None + if service.spec.external_i_ps: + external_ip = service.spec.external_i_ps[0] + if not external_ip and service.status.load_balancer.ingress: + external_ip = service.status.load_balancer.ingress[0].ip + return internal_ip, external_ip + + def k8s_convert_selector_to_label_string(self, selector_dict: dict) -> str: + if not isinstance(selector_dict, dict) or len(selector_dict) != 1: + raise ValueError("Expected a dictionary with a single key-value pair.") + + return ",".join([f"{k}={v}" for k, v in selector_dict.items()]) + + def get_nodes_by_selector(self, selector: dict[str, str]): + return self.CoreV1Api.list_node( + label_selector=self.k8s_convert_selector_to_label_string(selector) + ) + + def delete_namespace(self, namespace): + return self.CoreV1Api.delete_namespace(namespace) + + def read_namespace(self, namespace): + return self.CoreV1Api.read_namespace(namespace) + + def restart_deployment(self, deployment, namespace): + try: + now = datetime.datetime.now(datetime.UTC) + now = str(now.isoformat("T") + "Z") + body = { + "spec": { + "template": { + "metadata": { + "annotations": {"kubectl.kubernetes.io/restartedAt": now} + } + } + } + } + self.AppsV1Api.patch_namespaced_deployment( + deployment, namespace, body, pretty="true" + ) + except ApiException as e: + if e.status != 404: + raise + + def deployment_status_from_conditions(self, conditions): + available = False + progressing = False + last_condition = None + if conditions is not None: + for condition in conditions: + if condition.type == "Available" and condition.status == "True": + available = True + elif condition.type == "Progressing" and condition.status == "True": + progressing = True + last_condition = condition + + return { + "available": available, + "progressing": progressing, + "condition": last_condition, + } + + @cached_property + def AdmissionregistrationV1Api(self): + return kapi.AdmissionregistrationV1Api(api_client=self.client) + + @cached_property + def ApiextensionsV1Api(self): + return kapi.ApiextensionsV1Api(api_client=self.client) + + @cached_property + def ApiregistrationV1Api(self): + return kapi.ApiregistrationV1Api(api_client=self.client) + + @cached_property + def AppsV1Api(self): + return kapi.AppsV1Api(api_client=self.client) + + @cached_property + def AuthenticationV1Api(self): + return kapi.AuthenticationV1Api(api_client=self.client) + + @cached_property + def AuthorizationV1Api(self): + return kapi.AuthorizationV1Api(api_client=self.client) + + @cached_property + def BatchV1Api(self): + return kapi.BatchV1Api(api_client=self.client) + + @cached_property + def CertificatesV1Api(self): + return kapi.CertificatesV1Api(api_client=self.client) + + @cached_property + def CoordinationV1Api(self): + return kapi.CoordinationV1Api(api_client=self.client) + + @cached_property + def CoreV1Api(self): + return kapi.CoreV1Api(api_client=self.client) + + @cached_property + def CustomObjectsApi(self): + return kapi.CustomObjectsApi(api_client=self.client) + + @cached_property + def EventsV1Api(self): + return kapi.EventsV1Api(api_client=self.client) + + @cached_property + def NetworkingV1Api(self): + return kapi.NetworkingV1Api(api_client=self.client) + + @cached_property + def NodeV1Api(self): + return kapi.NodeV1Api(api_client=self.client) + + @cached_property + def RbacAuthorizationV1Api(self): + return kapi.RbacAuthorizationV1Api(api_client=self.client) + + @cached_property + def SchedulingV1Api(self): + return kapi.SchedulingV1Api(api_client=self.client) + + @cached_property + def StorageV1Api(self): + return kapi.StorageV1Api(api_client=self.client) + + @cached_property + def DatacovesApi(self): + return CustomDatacovesApi(kc=self) + + @cached_property + def MonitoringCoreosComV1Api(self): + return CustomMonitoringCoreosComV1Api(kc=self) + + @cached_property + def PolicyV1beta1Api(self): + return CustomPolicyV1beta1Api(kc=self) + + @cached_property + def StorageV1beta1Api(self): + return CustomStorageV1beta1Api(kc=self) diff --git a/src/core/api/app/lib/kubernetes/client_generator.py b/src/core/api/app/lib/kubernetes/client_generator.py new file mode 100644 index 00000000..30c9edb6 --- /dev/null +++ b/src/core/api/app/lib/kubernetes/client_generator.py @@ -0,0 +1,206 @@ +# fmt: off + +# Run with `python -m lib.kubernetes.client_generator`. + +# Got started on this with the code in kubernetes.utils.create_from_yaml_single_item. +# That's the way the kubernetes client exposes to make api calls from objects in +# a generic way, from plain json, yaml, plain dicts. + +# It has to undo the transformations their code generator does to make different +# classes and functions for each kind of resource. A complete waste of +# everybody's time. Python is a dynamic language, nothing wrong with dicts... +# Still, we want to go through their wrappers, in case they do validation, +# etc., so we have to undo the specialization, going from apiVersion and kinds +# to the appropriate functions in the api. We could have used +# create_from_yaml_single_item, but it does string wrangling with regexes to +# find the functions at runtime :S. We do that messiness here and generate a +# table to dispatch (hopefully) faster and safer at runtime. + +import re +from collections import defaultdict + +from kubernetes import client as kclient +from kubernetes import config + +from lib import gen + +config.load_kube_config() +k8s_client = kclient.ApiClient() + + +# From `kubectl api-resources`. +API_RESOURCES = [ + ("v1", "Binding", True), + ("v1", "ComponentStatus", False), + ("v1", "ConfigMap", True), + ("v1", "Endpoints", True), + ("v1", "Event", True), + ("v1", "LimitRange", True), + ("v1", "Namespace", False), + ("v1", "Node", False), + ("v1", "PersistentVolumeClaim", True), + ("v1", "PersistentVolume", False), + ("v1", "Pod", True), + ("v1", "PodTemplate", True), + ("v1", "ReplicationController", True), + ("v1", "ResourceQuota", True), + ("v1", "Secret", True), + ("v1", "ServiceAccount", True), + ("v1", "Service", True), + ("admissionregistration.k8s.io/v1", "MutatingWebhookConfiguration", False), + ("admissionregistration.k8s.io/v1", "ValidatingWebhookConfiguration", False), + ("apiextensions.k8s.io/v1", "CustomResourceDefinition", False), + ("apiregistration.k8s.io/v1", "APIService", False), + ("apps/v1", "ControllerRevision", True), + ("apps/v1", "DaemonSet", True), + ("apps/v1", "Deployment", True), + ("apps/v1", "ReplicaSet", True), + ("apps/v1", "StatefulSet", True), + ("authentication.k8s.io/v1", "TokenReview", False), + ("authorization.k8s.io/v1", "LocalSubjectAccessReview", True), + ("authorization.k8s.io/v1", "SelfSubjectAccessReview", False), + ("authorization.k8s.io/v1", "SelfSubjectRulesReview", False), + ("authorization.k8s.io/v1", "SubjectAccessReview", False), + ("batch/v1", "CronJob", True), + ("batch/v1", "Job", True), + ("certificates.k8s.io/v1", "CertificateSigningRequest", False), + ("coordination.k8s.io/v1", "Lease", True), + ("events.k8s.io/v1", "Event", True), + ("networking.k8s.io/v1", "IngressClass", False), + ("networking.k8s.io/v1", "Ingress", True), + ("networking.k8s.io/v1", "NetworkPolicy", True), + ("node.k8s.io/v1", "RuntimeClass", False), + ("policy/v1beta1", "PodSecurityPolicy", False), + ("rbac.authorization.k8s.io/v1", "ClusterRoleBinding", False), + ("rbac.authorization.k8s.io/v1", "ClusterRole", False), + ("rbac.authorization.k8s.io/v1", "RoleBinding", True), + ("rbac.authorization.k8s.io/v1", "Role", True), + ("scheduling.k8s.io/v1", "PriorityClass", False), + ("storage.k8s.io/v1", "CSIDriver", False), + ("storage.k8s.io/v1", "CSINode", False), + ("storage.k8s.io/v1beta1", "CSIStorageCapacity", True), + ("storage.k8s.io/v1", "StorageClass", False), + ("storage.k8s.io/v1", "VolumeAttachment", False), + ("datacoves.com/v1", "Account", True), + ("datacoves.com/v1", "HelmRelease", True), + ("datacoves.com/v1", "User", True), + ("datacoves.com/v1", "Workspace", True), + ('monitoring.coreos.com/v1', 'ServiceMonitor', True), +] + + +API_METHODS = ["create", "read", "replace", "delete"] # TODO: "patch", "read" + + +UPPER_FOLLOWED_BY_LOWER_RE = re.compile("(.)([A-Z][a-z]+)") +LOWER_OR_NUM_FOLLOWED_BY_UPPER_RE = re.compile("([a-z0-9])([A-Z])") + + +def gen_api_class_name(api_version, kind): + # Stolen from kubernetes.utils.create_from_yaml_single_item + group, _, version = api_version.partition("/") + if version == "": + version = group + group = "core" + if group == "datacoves.com": + return "DatacovesApi" + group = "".join(group.rsplit(".k8s.io", 1)) + group = "".join(word.capitalize() for word in group.split(".")) + return "{0}{1}Api".format(group, version.capitalize()) + + +def generate(): # noqa: C901 + fragments = {} + api_class_names = set() + custom_api_class_names = set() + custom_resources = defaultdict(set) + dispatch = defaultdict(dict) + + for api_version, kind, namespaced in API_RESOURCES: + api_class_name = gen_api_class_name(api_version, kind) + api_class = getattr(kclient, api_class_name, None) + api_class_names.add(api_class_name if api_class else "CustomObjectsApi") + api_kind = UPPER_FOLLOWED_BY_LOWER_RE.sub(r"\1_\2", kind) + api_kind = LOWER_OR_NUM_FOLLOWED_BY_UPPER_RE.sub(r"\1_\2", api_kind).lower() + + if not api_class: + custom_api_class_names.add(api_class_name) + custom_resources[api_class_name].add((api_version, kind)) + + for method in API_METHODS: + dispatch_m = dispatch[method] + pre = f"{method}_namespaced_" if namespaced else f"{method}_" + f = f"{pre}{api_kind}" + if api_class and not hasattr(api_class, f): + # print(f"function not found: {api_class_name}().{f}") + continue + dispatch_m[(api_version, kind)] = (api_class_name, f, namespaced) + + emit = gen.emitter(fragments, "DISPATCH") + emit("# fmt: off") + for method in API_METHODS: + emit(f"DISPATCH_{method.upper()} =", "{") + for k, v in dispatch[method].items(): + emit(f" {k}: {v}, # noqa") + emit("}") + emit("# fmt: on") + + emit = gen.emitter(fragments, "API_PROPERTIES") + for api_class_name in sorted(api_class_names): + emit(f"") + emit(f" @cached_property") + emit(f" def {api_class_name}(self):") + emit(f" return kapi.{api_class_name}(api_client=self.client)") + for api_class_name in sorted(custom_api_class_names): + emit(f"") + emit(f" @cached_property") + emit(f" def {api_class_name}(self):") + emit(f" return Custom{api_class_name}(kc=self)") + + emit = gen.emitter(fragments, "CUSTOM_API_CLASSES") + for api_class_name in sorted(custom_api_class_names): + emit(f"") + emit(f"class Custom{api_class_name}(object):") + emit(f" def __init__(self, kc):") + emit(f" self.kc = kc") + for (api_version, kind) in sorted(custom_resources[api_class_name]): + ty = (api_version, kind) + _, m_create, namespaced = dispatch["create"][ty] + _, m_read, _ = dispatch["read"][ty] + _, m_replace, _ = dispatch["replace"][ty] + _, m_delete, _ = dispatch["delete"][ty] + + group, version = api_version.split("/") + group, version = repr(group), repr(version) + plural = repr(f"{kind.lower()}s") + + if not namespaced: + continue # TODO: Custom non-namespaced. We don't need it for now. + + emit(f"") + emit(f" def {m_create}(self, namespace, res, **kwargs):") + emit(f" return self.kc.CustomObjectsApi.create_namespaced_custom_object(") + emit(f" {group}, {version}, namespace, {plural}, res, **kwargs)") + emit(f"") + emit(f" def {m_read}(self, name, namespace, **kwargs):") + emit(f" return self.kc.CustomObjectsApi.get_namespaced_custom_object(") + emit(f" {group}, {version}, namespace, {plural}, name, **kwargs)") + emit(f"") + emit(f" def {m_replace}(self, name, namespace, res, **kwargs):") + emit(f" return self.kc.CustomObjectsApi.replace_namespaced_custom_object(") + emit(f" {group}, {version}, namespace, {plural}, name, res, **kwargs)") + emit(f"") + emit(f" def {m_delete}(self, name, namespace, **kwargs):") + emit(f" return self.kc.CustomObjectsApi.delete_namespaced_custom_object(") + emit(f" {group}, {version}, namespace, {plural}, name, **kwargs)") + + # debug + # print(fragments["DISPATCH"]) + # print(fragments["API_PROPERTIES"]) + # print(fragments["CUSTOM_API_CLASSES"]) + + return fragments + + +if __name__ == "__main__": + gen.render(gen.output_path(__file__), generate()) diff --git a/src/core/api/app/lib/kubernetes/client_template.py b/src/core/api/app/lib/kubernetes/client_template.py new file mode 100644 index 00000000..40f62dd0 --- /dev/null +++ b/src/core/api/app/lib/kubernetes/client_template.py @@ -0,0 +1,237 @@ +raise Exception("Attemted to run template file.") +import datetime +import logging +from functools import cached_property + +import kubernetes.client as kclient +import kubernetes.client.api as kapi +import kubernetes.config as kconfig +from kubernetes.client.exceptions import ApiException + +# gen: DISPATCH + +# gen: CUSTOM_API_CLASSES + + +logger = logging.getLogger(__name__) + + +class Kubectl: + def __init__(self, client=None, config=None, in_cluster=True): + if not client: + if not config: + if in_cluster: + config = kconfig.load_incluster_config() + else: + config = kconfig.load_kube_config() + client = kclient.ApiClient(configuration=config) + self.client = client + + def apply_resources(self, namespace, resources, log=None): + for res in resources: + ty = (res["apiVersion"], res["kind"]) + assert ty in DISPATCH_CREATE, f"unrecognized resource type: {ty}" + namespaced = DISPATCH_CREATE[ty][2] + if namespaced and res.get("metadata", {}).get("namespace") is None: + res["metadata"]["namespace"] = namespace + + for res in resources: + self.apply(res, log=log) + + def apply(self, resource, log=None): + written, created, ret_obj = self.update_or_create(resource) + + if callable(log): + api_version, kind, name = self.get_resource_metadata(resource) + namespace = self.get_resource_namespace(resource) + log(f"{'Created' if created else 'Updated'} {kind} {namespace}/{name}") + + return written, created, ret_obj + + def update_or_create(self, res, **kwargs): + written, created = False, False + obj = self.read(res, **kwargs) + if obj is None: + ret_obj = self.create(res, **kwargs) + written = True + created = True + return written, created, ret_obj + + if hasattr(obj, "immutable"): + return written, created, obj + + # We have to send the resourceVersion we got. If there's a modification in + # between our GET and PUT, the PUT will fail. + resource_version = ( + obj["metadata"]["resourceVersion"] + if isinstance(obj, dict) + else obj.metadata.resource_version + ) + if isinstance(res, dict): + res["metadata"]["resourceVersion"] = resource_version + else: + res.metadata.resource_version = resource_version + + ret_obj = self.replace(res, **kwargs) + written = True + return written, created, ret_obj + + def create(self, res, **kwargs): + api_version, kind, _ = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_CREATE[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + namespace = self.get_resource_namespace(res) if namespaced else None + return fn(namespace, res, **kwargs) if namespace else fn(res, **kwargs) + + def read(self, res, raise_404=False, **kwargs): + api_version, kind, name = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_READ[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + try: + namespace = self.get_resource_namespace(res) if namespaced else None + return fn(name, namespace, **kwargs) if namespace else fn(name, **kwargs) + except kclient.exceptions.ApiException as e: + if raise_404 or e.status != 404: + raise e + return None + + def replace(self, res, **kwargs): + api_version, kind, name = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_REPLACE[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + namespace = self.get_resource_namespace(res) if namespaced else None + return ( + fn(name, namespace, res, **kwargs) if namespace else fn(name, res, **kwargs) + ) + + def delete(self, res, raise_404=False, **kwargs): + api_version, kind, name = self.get_resource_metadata(res) + api_class, f, namespaced = DISPATCH_DELETE[(api_version, kind)] + api = getattr(self, api_class) + fn = getattr(api, f) + try: + namespace = self.get_resource_namespace(res) if namespaced else None + return fn(name, namespace, **kwargs) if namespace else fn(name, **kwargs) + except kclient.exceptions.ApiException as e: + if raise_404 or e.status != 404: + raise e + + def get_resource_namespace(self, res) -> str: + return ( + res["metadata"]["namespace"] + if isinstance(res, dict) + else res.metadata.namespace + ) + + def get_resource_metadata(self, res) -> tuple: + if isinstance(res, dict): + api_version = res["apiVersion"] + kind = res["kind"] + name = res["metadata"]["name"] + else: + api_version = res.api_version + kind = res.kind + name = res.metadata.name + + if api_version is None and isinstance(res, kclient.V1NetworkPolicy): + api_version = "networking.k8s.io/v1" + kind = "NetworkPolicy" + + return api_version, kind, name + + def get_cluster_apiserver_ips(self) -> dict: + try: + endpoints = self.CoreV1Api.read_namespaced_endpoints( + namespace="default", name="kubernetes" + ) + ips = [] + ports = [] + for subsets in endpoints.subsets: + for address in subsets.addresses: + ips.append(address.ip) + + ports_filtered = filter( + lambda item: item.name == "https", subsets.ports + ) + ports_aux = list(map(lambda item: item.port, ports_filtered)) + if ports_aux and ports_aux[0] not in ports: + ports.append(ports_aux[0]) + + return {"ips": ips, "ports": ports} + + except Exception as e: + logger.error("Cluster api server: %s", e.__str__()) + return {} + + def get_ingress_controller_ips(self): + service = self.CoreV1Api.read_namespaced_service( + "ingress-nginx-controller", "ingress-nginx" + ) + internal_ip = service.spec.cluster_ip + external_ip = None + if service.spec.external_i_ps: + external_ip = service.spec.external_i_ps[0] + if not external_ip and service.status.load_balancer.ingress: + external_ip = service.status.load_balancer.ingress[0].ip + return internal_ip, external_ip + + def k8s_convert_selector_to_label_string(self, selector_dict: dict) -> str: + if not isinstance(selector_dict, dict) or len(selector_dict) != 1: + raise ValueError("Expected a dictionary with a single key-value pair.") + + return ",".join([f"{k}={v}" for k, v in selector_dict.items()]) + + def get_nodes_by_selector(self, selector: dict[str, str]): + return self.CoreV1Api.list_node( + label_selector=self.k8s_convert_selector_to_label_string(selector) + ) + + def delete_namespace(self, namespace): + return self.CoreV1Api.delete_namespace(namespace) + + def read_namespace(self, namespace): + return self.CoreV1Api.read_namespace(namespace) + + def restart_deployment(self, deployment, namespace): + try: + now = datetime.datetime.now(datetime.UTC) + now = str(now.isoformat("T") + "Z") + body = { + "spec": { + "template": { + "metadata": { + "annotations": {"kubectl.kubernetes.io/restartedAt": now} + } + } + } + } + self.AppsV1Api.patch_namespaced_deployment( + deployment, namespace, body, pretty="true" + ) + except ApiException as e: + if e.status != 404: + raise + + def deployment_status_from_conditions(self, conditions): + available = False + progressing = False + last_condition = None + if conditions is not None: + for condition in conditions: + if condition.type == "Available" and condition.status == "True": + available = True + elif condition.type == "Progressing" and condition.status == "True": + progressing = True + last_condition = condition + + return { + "available": available, + "progressing": progressing, + "condition": last_condition, + } + + +# gen: API_PROPERTIES diff --git a/src/core/api/app/lib/kubernetes/k8s_utils.py b/src/core/api/app/lib/kubernetes/k8s_utils.py new file mode 100644 index 00000000..a666965a --- /dev/null +++ b/src/core/api/app/lib/kubernetes/k8s_utils.py @@ -0,0 +1,404 @@ +import copy +import re +from enum import Enum + +from kubernetes.client.models import V1ConfigMap, V1Deployment, V1Pod, V1Secret + +from lib.kubernetes import client as k8s_client + + +class KubeUnitsMemory(Enum): + Mi = "Mi" + Ki = "Ki" + Gi = "Gi" + + +def k8s_extract_numerical_value_and_units(s: str) -> tuple: + """ + Extracts numerical characters (including decimal points) and units from a string. + + Args: + - s (str): Input string. + + Returns: + - Tuple[Union[int, float], str]: Tuple containing the numerical value and the unit. + """ + + # Extracting digits, the decimal point, and non-digits for units + numerical_only = "".join([char for char in s if char.isdigit() or char == "."]) + units = "".join([char for char in s if not char.isdigit() and char != "."]) + + # Check for multiple decimal points or empty string + if numerical_only.count(".") > 1 or not numerical_only: + raise ValueError(f"The extracted numerical value is not valid: {s}") + + # Convert the numerical part to float or int based on the presence of a decimal point + numerical_value = ( + float(numerical_only) if "." in numerical_only else int(numerical_only) + ) + + return numerical_value, units + + +def k8s_convert_to_mebibytes(memory: int, units: KubeUnitsMemory) -> int: + """ + Convert memory value to Mebibytes (Mi) based on the provided units. + + Args: + - memory (int): Memory value. + - units (KubeUnitsMemory): Units of the memory (either 'Mi', 'Ki' or 'Gi'). + + Returns: + - int: Memory value in Mi. + """ + + if units == KubeUnitsMemory.Mi: + return memory + elif units == KubeUnitsMemory.Ki: + return memory // 1024 # Convert Kibibytes to Mebibytes + elif units == KubeUnitsMemory.Gi: + return int(memory * 1024) # Convert Gibibytes to Mebibytes + else: + raise Exception( + f"Unexpected memory units: {units.value} (expected 'Mi', 'Ki' or 'Gi')" + ) + + +def k8s_convert_to_cpu(cpu: float, is_milli_cpu=True) -> float: + """ + Convert cpu value to CPU based on the provided units. + + Args: + - cpu (float): CPU value. + - is_milli_cpu (bool): Milli cpu + + Returns: + - float: CPU value. + """ + + if is_milli_cpu: + return cpu / 1000 + else: + return cpu + + +def k8s_resources_combine(resources: dict, custom_resources: dict) -> dict: + resources_new = copy.deepcopy(custom_resources) + + _k8s_resources_combine( + resources=resources, + custom_resources=resources_new, + res_name="requests", + ) + + _k8s_resources_combine( + resources=resources, + custom_resources=resources_new, + res_name="limits", + ) + + return resources_new + + +def _k8s_resources_combine(resources: dict, custom_resources: dict, res_name: str): + if res_name in custom_resources: + if "memory" in custom_resources[res_name]: + memory, memory_units = k8s_extract_numerical_value_and_units( + resources[res_name]["memory"] + ) + memory = k8s_convert_to_mebibytes(memory, KubeUnitsMemory(memory_units)) + + custom_memory, custom_memory_units = k8s_extract_numerical_value_and_units( + custom_resources[res_name]["memory"] + ) + custom_memory = k8s_convert_to_mebibytes( + custom_memory, KubeUnitsMemory(custom_memory_units) + ) + + if custom_memory < memory: + custom_resources[res_name]["memory"] = f"{memory}Mi" + + if "cpu" in custom_resources[res_name]: + cpu, cpu_units = k8s_extract_numerical_value_and_units( + resources[res_name]["cpu"] + ) + cpu = k8s_convert_to_cpu(cpu, cpu_units == "m") + + custom_cpu, custom_cpu_units = k8s_extract_numerical_value_and_units( + resources[res_name]["cpu"] + ) + custom_cpu = k8s_convert_to_cpu(custom_cpu, custom_cpu_units == "m") + + if custom_cpu < cpu: + custom_resources[res_name]["cpu"] = cpu + + +def gen_cron_job( + name: str, + namespace: str, + schedule: str, + image: str, + command: list, + image_pull_secret: str, + envs: dict = {}, + volumes: dict = None, + **kwargs, +) -> dict: + cron_job = { + "apiVersion": "batch/v1", + "kind": "CronJob", + "metadata": { + "name": name, + "namespace": namespace, + "labels": kwargs.get("labels", {}), + }, + "spec": { + "schedule": schedule, + "successfulJobsHistoryLimit": 3, + "jobTemplate": { + "spec": { + "template": { + "spec": { + "containers": [ + { + "name": name, + "image": image, + "imagePullPolicy": "IfNotPresent", + "command": command, + "env": envs, + "resources": { + "requests": {"cpu": "50m", "memory": "100Mi"}, + "limits": {"cpu": "200m", "memory": "300Mi"}, + }, + } + ], + "restartPolicy": "OnFailure", + "imagePullSecrets": [{"name": image_pull_secret}], + } + } + } + }, + }, + } + + if volumes: + vol = cron_job["spec"]["jobTemplate"]["spec"]["template"]["spec"] + vol["containers"][0]["volumeMounts"] = volumes["volume_mounts"] + vol["volumes"] = volumes["volumes"] + + return cron_job + + +def _k8s_resources_update_properties(k8s_resources: list): + for item in k8s_resources: + if isinstance(item, V1Secret): + kind = item.kind or "Secret" + elif isinstance(item, V1ConfigMap): + kind = item.kind or "ConfigMap" + + item.api_version = item.api_version or "v1" + item.kind = kind + + +def _get_all_secrets_and_config_map_by_namespace(namespace: str) -> list: + kubectl = k8s_client.Kubectl() + config_maps = kubectl.CoreV1Api.list_namespaced_config_map( + namespace=namespace + ).items + secrets = kubectl.CoreV1Api.list_namespaced_secret(namespace=namespace).items + k8s_resources = secrets + config_maps + _k8s_resources_update_properties(k8s_resources=k8s_resources) + return k8s_resources + + +def get_all_secrets_and_config_maps_resources_noused_by_namespace( + namespace: str, +) -> list: + "Returns the list of resources to be removed on kubernetes" + kubectl = k8s_client.Kubectl() + secrets_used = [] + config_maps_used = [] + + # All resources + all_secrets_and_config_maps = _get_all_secrets_and_config_map_by_namespace( + namespace=namespace + ) + + # Deployments + items = kubectl.AppsV1Api.list_namespaced_deployment(namespace=namespace).items + secrets_used.extend(_get_all_secret_and_config_map_resources_used(items)["secrets"]) + config_maps_used.extend( + _get_all_secret_and_config_map_resources_used(items)["config_maps"] + ) + + # Pods + items = kubectl.CoreV1Api.list_namespaced_pod(namespace=namespace).items + secrets_used.extend(_get_all_secret_and_config_map_resources_used(items)["secrets"]) + config_maps_used.extend( + _get_all_secret_and_config_map_resources_used(items)["config_maps"] + ) + + # Workspace + if namespace.startswith("dcw-"): + datacoves_workspace_res = ( + _get_all_datacoves_workspace_secret_and_config_map_used_by_namespace( + namespace=namespace, name=namespace.replace("dcw-", "") + ) + ) + secrets_used.extend(datacoves_workspace_res["secrets"]) + config_maps_used.extend(datacoves_workspace_res["config_maps"]) + + # Extra resources + extra_res_used = _get_duplicate_resources(k8s_resources=all_secrets_and_config_maps) + secrets_used.extend(extra_res_used["secrets"]) + config_maps_used.extend(extra_res_used["config_maps"]) + + res_used = set(secrets_used + config_maps_used) + res_noused = [] + for item in all_secrets_and_config_maps: + res_name = item.metadata.name + if res_name not in res_used: + res_noused.append(item) + + return res_noused + + +def _get_duplicate_resources(k8s_resources: str) -> dict: + """Returns K8s resources used group them by name prefix. + + Args: + k8s_resources (list): K8s resources + + Returns: + list: K8s resources used. + """ + res_names = list(map(lambda x: x.metadata.name, k8s_resources)) + res_name_prefix = [] + for item_name in res_names: + # Group resources like: airflow-values-4ec519c0d0 + _hash = re.findall(r"(-[a-zA-Z\d]{10})$", item_name) + name = item_name.replace(_hash.pop(), "") if _hash else None + + if name is None: + # Group resources like: sh.helm.release.v1.dev123-airflow.v1 + _helm_version = re.findall(r"(sh.helm.release.v1.*.v)(\d+)", item_name) + name = _helm_version.pop()[0] if _helm_version else item_name + + res_name_prefix.append(name) + + # Get item used from duplicates + secrets_used = [] + config_maps_used = [] + for name_prefix in set(res_name_prefix): + resources = list( + filter(lambda x: x.metadata.name.startswith(name_prefix), k8s_resources) + ) + + if resources: + resources.sort(key=lambda r: r.metadata.creation_timestamp, reverse=True) + res = resources[0] + if isinstance(res, V1Secret): + secrets_used.append(res.metadata.name) + elif isinstance(res, V1ConfigMap): + config_maps_used.append(res.metadata.name) + + return {"secrets": secrets_used, "config_maps": config_maps_used} + + +def _get_all_datacoves_workspace_secret_and_config_map_used_by_namespace( + namespace: str, name: str +) -> dict: + res_used = {"secrets": [], "config_maps": []} + kubectl = k8s_client.Kubectl() + workspace = kubectl.DatacovesApi.read_namespaced_workspace( + name=name, namespace=namespace + ) + for res in workspace["spec"]["configs"].values(): + # There is not way to know if it is a secret or configmap + res_used["secrets"].append(res) + res_used["config_maps"].append(res) + + code_server_secret_name_by_user = list( + map(lambda x: x["secretName"], workspace["spec"]["users"]) + ) + res_used["secrets"].extend(code_server_secret_name_by_user) + return res_used + + +def _get_resources_from_containers(containers) -> tuple: + resources_secret = [] + resources_config_map = [] + + for container in containers: + if container.env: + for env_var in container.env: + if env_var.value_from and env_var.value_from.secret_key_ref: + resources_secret.append(env_var.value_from.secret_key_ref.name) + elif env_var.value_from and env_var.value_from.config_map_key_ref: + resources_config_map.append( + env_var.value_from.config_map_key_ref.name + ) + + return resources_secret, resources_config_map + + +def _get_resources_from_volumnes(volumes) -> tuple: + resources_secret = [] + resources_config_map = [] + + if volumes: + for volume in volumes: + if volume.secret: + resources_secret.append(volume.secret.secret_name) + + if volume.config_map: + resources_config_map.append(volume.config_map.name) + + return resources_secret, resources_config_map + + +def _get_resources_from_image_pull_secret(image_pull_secrets) -> list: + resources_secret = [] + + if image_pull_secrets: + for secret in image_pull_secrets: + resources_secret.append(secret.name) + + return resources_secret + + +def _get_all_secret_and_config_map_resources_used(k8s_resources: any) -> dict: + "Returns the list of resources to be removed on kubernetes" + resources_secret = [] + resources_config_map = [] + + for item in k8s_resources: + if isinstance(item, V1Deployment): + containers = item.spec.template.spec.containers + volumes = item.spec.template.spec.volumes + image_pull_secrets = item.spec.template.spec.image_pull_secrets + elif isinstance(item, V1Pod): + containers = item.spec.containers + volumes = item.spec.volumes + image_pull_secrets = item.spec.image_pull_secrets + else: + containers = [] + volumes = [] + + # Containers + secrets, config_map = _get_resources_from_containers(containers=containers) + resources_secret.extend(secrets) + resources_config_map.extend(config_map) + + # Volumnes + secrets, config_map = _get_resources_from_volumnes(volumes=volumes) + resources_secret.extend(secrets) + resources_config_map.extend(config_map) + + # PullSecrets + secrets = _get_resources_from_image_pull_secret( + image_pull_secrets=image_pull_secrets + ) + resources_secret.extend(secrets) + + return {"secrets": resources_secret, "config_maps": resources_config_map} diff --git a/src/core/api/app/lib/kubernetes/make.py b/src/core/api/app/lib/kubernetes/make.py new file mode 100644 index 00000000..093899b3 --- /dev/null +++ b/src/core/api/app/lib/kubernetes/make.py @@ -0,0 +1,343 @@ +# This module has k8s resource construction utilities. Functions return the +# resource definitions as python dicts. + +import base64 +import hashlib +import json + +import yaml + +import lib.kubernetes.client as k8s_client + +yaml.Dumper.ignore_aliases = lambda *args: True + + +### Namespaces ### + + +def namespace(name): + return { + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": name, + }, + } + + +def namespace_quota(name="compute-resources", namespace=None, spec=None): + """ + spec should be a dict with the following structure + { + "hard": { + "requests.cpu": "1", + "requests.memory": "1Gi", + "limits.cpu": "2", + "limits.memory": "2Gi", + "requests.nvidia.com/gpu": 4 + } + } + + """ + if spec is None: + spec = {} + return { + "apiVersion": "v1", + "kind": "ResourceQuota", + "metadata": {"name": name, "namespace": namespace}, + "spec": spec, + } + + +def namespace_limit_range(name="compute-resources-limit-range", namespace=None): + """ + spec should be a dict with the following structure + { + "hard": { + "requests.cpu": "1", + "requests.memory": "1Gi", + "limits.cpu": "2", + "limits.memory": "2Gi", + "requests.nvidia.com/gpu": 4 + } + } + + """ + + spec = { + "limits": [ + { + "default": {"cpu": "500m", "memory": "512m"}, + "defaultRequest": {"cpu": "100m", "memory": "128m"}, + "type": "Container", + } + ] + } + + return { + "apiVersion": "v1", + "kind": "LimitRange", + "metadata": {"name": name, "namespace": namespace}, + "spec": spec, + } + + +### Pods ### + + +def pod(name, namespace, spec, metadata={}): + meta = { + "name": name, + "namespace": namespace, + } + meta.update(metadata) + return { + "apiVersion": "v1", + "kind": "Pod", + "metadata": meta, + "spec": spec, + } + + +### Secrets and ConfigMaps ### + + +def hashed_config_map(name, data, **kwargs): + return hash_config(config_map(name, data, **kwargs)) + + +def hashed_secret(name, data, **kwargs): + return hash_config(secret(name, data, **kwargs)) + + +def hashed_json_config_map(name, data, **kwargs): + return hash_config(json_config_map(name, data, **kwargs)) + + +def hashed_yaml_file_secret(name, filename, data, **kwargs): + return hash_config(yaml_file_secret(name, filename, data, **kwargs)) + + +# Hashes for config names (mirrors operator/controller/utils/hash.go) + +HASH_LEN = 10 + + +def config_hash(config): + """Computes the hash of the data of a Secret or ConfigMap.""" + h = hashlib.sha256() + for k, v in sorted(config.get("data", {}).items()): + h.update(to_bytes(k)) + h.update(to_bytes(v)) + for k, v in sorted(config.get("stringData", {}).items()): + h.update(to_bytes(k)) + h.update(to_bytes(v)) + for k, v in sorted(config.get("binaryData", {}).items()): + h.update(to_bytes(k)) + h.update(to_bytes(v)) + return h.hexdigest()[:HASH_LEN] + + +def string_hash(s): + h = hashlib.sha256() + h.update(to_bytes(s)) + return h.hexdigest()[:HASH_LEN] + + +def to_bytes(v): + return bytes(v, "utf8") if isinstance(v, str) else v + + +def hash_config(config): + """Turns a ConfigMap or Secret into one named with a hash of its data.""" + config["metadata"]["name"] += "-" + config_hash(config) + config["immutable"] = True + return config + + +def res_config_hashes(res): + # Collect all the names of hashed secrets and config maps. It simplifies the + # rest of the code to do it systematically here. A bit hackish because we + # use heuristics to tell them apart from other resources. + config_hashes = {} + for r in res: + if r["kind"] in ("Secret", "ConfigMap") and r.get("immutable"): + name = r["metadata"]["name"] + l = HASH_LEN + 1 + if len(name) < l or name[-l] != "-": + continue + config_hashes[name[:-l]] = name + + return config_hashes + + +def docker_config_secret(name, data, **kwargs): + data = {".dockerconfigjson": json.dumps(data)} + return secret(name, data, type="kubernetes.io/dockerconfigjson", **kwargs) + + +def yaml_file_secret(name, filename, data, **kwargs): + data = {filename: yaml.dump(data)} + return secret(name, data, **kwargs) + + +def secret(name, data, type="Opaque", **kwargs): + data_raw = { + k: str(base64.b64encode(bytes(str(v), "utf-8")), encoding="utf-8") + for k, v in data.items() + } + return secret_raw(name, data_raw, type, **kwargs) + + +def secret_raw(name, data, type="Opaque", namespace=None, **kwargs): + return { + "apiVersion": "v1", + "kind": "Secret", + "type": type, + "metadata": { + "name": name, + "namespace": namespace, + "annotations": kwargs.get("annotations", {}), + "labels": kwargs.get("labels", {}), + }, + "data": data, + } + + +def json_config_map(name, data, **kwargs): + return config_map(name, {k: json.dumps(v) for k, v in data.items()}, **kwargs) + + +def config_map(name, data, **kwargs): + return { + "apiVersion": "v1", + "kind": "ConfigMap", + "metadata": { + "name": name, + "annotations": kwargs.get("annotations", {}), + "labels": kwargs.get("labels", {}), + }, + "data": data, + } + + +def configmap_namespaced(name: str, namespace: str, data, **kwargs): + cm = config_map(name, data, kwargs=kwargs) + cm["metadata"]["namespace"] = namespace + return cm + + +### Volumes ### + + +def persistent_volume_claim( + name, storage_class, size, volume_name, access_modes=["ReadWriteMany"] +): + return { + "kind": "PersistentVolumeClaim", + "apiVersion": "v1", + "metadata": {"name": name}, + "spec": { + "storageClassName": storage_class, + "accessModes": access_modes, + "resources": {"requests": {"storage": size}}, + "volumeName": volume_name, + }, + } + + +def efs_storage_class(): + return { + "kind": "StorageClass", + "apiVersion": "storage.k8s.io/v1", + "metadata": {"name": "efs"}, + "provisioner": "efs.csi.aws.com", + "mountOptions": ["tls"], + } + + +def efs_persistent_volume(name, volume_handle, size): + return { + "apiVersion": "v1", + "kind": "PersistentVolume", + "metadata": {"name": name}, + "spec": { + # The capacity is ignored by the driver, but it needs to be + # specified anyway. A PVC requesting more storage than this won't + # be bound to to this PV. + "capacity": {"storage": size}, + "volumeMode": "Filesystem", + "accessModes": ["ReadWriteMany"], + "persistentVolumeReclaimPolicy": "Retain", + "storageClassName": "efs", + "csi": { + "driver": "efs.csi.aws.com", + # Volume handle is the EFS file system id. + "volumeHandle": volume_handle, + "volumeAttributes": {"encryptInTransit": "true"}, + }, + }, + } + + +def admission_webhook_crt(secret="admission-controller", namespace="core"): + kc = k8s_client.Kubectl() + secret = kc.CoreV1Api.read_namespaced_secret(secret, namespace) + return secret.data.get("webhook.crt") + + +def admission_webhook( + workspace, + namespace, + service_namespace="core", + service_name="admission-controller", + service_path="/", + service_port=80, +): + ca_bundle = admission_webhook_crt() + admission_webhook = { + "apiVersion": "admissionregistration.k8s.io/v1", + "kind": "ValidatingWebhookConfiguration", + "metadata": {"name": f"{workspace}-admission-webhook"}, + "webhooks": [ + { + "name": "admission-controller.admission-controller.svc", + "namespaceSelector": { + "matchExpressions": [ + { + "key": "k8s.datacoves.com/workspace", + "operator": "In", + "values": [workspace], + }, + { + "key": "kubernetes.io/metadata.name", + "operator": "In", + "values": [namespace], + }, + ] + }, + "rules": [ + { + "apiGroups": [""], + "apiVersions": ["v1", "v1beta1"], + "operations": ["CREATE"], + "resources": ["pods", "pods/*"], + "scope": "Namespaced", + } + ], + "clientConfig": { + "service": { + "namespace": service_namespace, + "name": service_name, + "path": service_path, + "port": service_port, + }, + "caBundle": ca_bundle, + }, + "admissionReviewVersions": ["v1"], + "sideEffects": "None", + "timeoutSeconds": 10, + } + ], + } + return admission_webhook diff --git a/src/core/api/app/lib/networking.py b/src/core/api/app/lib/networking.py new file mode 100644 index 00000000..458f4d8d --- /dev/null +++ b/src/core/api/app/lib/networking.py @@ -0,0 +1,5 @@ +import socket + + +def resolve_ip(hostname): + return socket.gethostbyname(hostname) diff --git a/src/core/api/app/lib/requirements.py b/src/core/api/app/lib/requirements.py new file mode 100644 index 00000000..89bbf891 --- /dev/null +++ b/src/core/api/app/lib/requirements.py @@ -0,0 +1,188 @@ +# Why do we need to do this ourselves? I tried to find a library that would do it +# and pypa/packaging comes close but can't tell when two requirements are incompatible. +# We'd like to do that check before trying to run pip, to tell the user right away. +# https://github.com/pypa/packaging/issues/598 +from math import inf, isfinite + +from packaging.markers import Marker +from packaging.requirements import Requirement +from packaging.specifiers import SpecifierSet + + +def merge_requirement_lines(a, b): + if not a and not b: + return [] + if not a: + return b + if not b: + return a + return map(str, merge_requirement_lists(map(Requirement, a), map(Requirement, b))) + + +def merge_requirement_lists(a, b): + merged = {r.name: r for r in a} + for r in b: + merged[r.name] = ( + merge_requirements(merged[r.name], r) if r.name in merged else r + ) + return merged.values() + + +def merge_requirements(a: Requirement, b: Requirement): + assert a.name == b.name, "cannot merge requirements with different names" + merged = Requirement(str(a)) + try: + merged.specifier = merge_specifier_sets(a.specifier, b.specifier) + except NotImplementedError: + merged.specifier = a.specifier & b.specifier + merged.extras = a.extras | b.extras + assert not (a.url and b.url and a.url != b.url), "cannot merge urls" + merged.url = a.url or b.url + if a.marker and b.marker: + merged.marker = Marker(f"{a.marker} and {b.marker}") + else: + merged.marker = a.marker or b.marker + return merged + + +def merge_specifier_sets(a: SpecifierSet, b: SpecifierSet): + # https://peps.python.org/pep-0440/#version-specifiers + # TODO: What are prereleases? Do we need the fallback or can we handle them too? + if any(s.prereleases for s in a) or any(s.prereleases for s in b): + raise NotImplementedError() + intervals = list(map(specifier_to_interval, a)) + list( + map(specifier_to_interval, b) + ) + if not intervals: + return a # There are no constraints, return one of the inputs, must be empty. + merged_interval = ((-inf,), (inf,)) + for interval in intervals: + merged_interval, interval = promote_intervals(merged_interval, interval) + merged_interval = interval_intersection(merged_interval, interval) + assert merged_interval, "cannot merge version specifiers" + return SpecifierSet(interval_to_specifier_str(merged_interval)) + + +def interval_to_specifier_str(interval): + a, b = interval + if a == b: + return f"=={version_tuple_to_version_str(a)}" + elif is_top(b): + return f">={version_tuple_to_version_str(a)}" + elif is_bottom(a): + return f"<{version_tuple_to_version_str(b)}" + elif not is_finite(a): + raise NotImplementedError() + elif is_finite(b): + return f">={version_tuple_to_version_str(a)},<{version_tuple_to_version_str(b)}" + else: # a is finite, b is not + b = _remove_infs(b) + if len(a) >= 2: + major, minor, *_ = a + if b == (major, minor): + return f"~={version_tuple_to_version_str(a)}" + b = next_version(b) + return f">={version_tuple_to_version_str(a)},<{version_tuple_to_version_str(b)}" + + +def _remove_infs(b): + b = list(b) + while len(b) > 0 and b[-1] == inf: + b.pop() + return tuple(b) + + +def is_finite(a): + return all(map(isfinite, a)) + + +def is_bottom(a): + return a[0] == -inf + + +def is_top(b): + return b[0] == inf + + +def version_tuple_to_version_str(v): + return ".".join([str(i) for i in v if i not in (-inf, inf)]) + + +def specifier_to_interval(s): + """Convert a version specifier to an interval [a, b), represented as a tuple.""" + try: + version = version_str_to_tuple(s.version) + except ValueError: + raise NotImplementedError() + + l = len(version) + a, b = promote_min_version((-inf,), l), promote_max_version((inf,), l) + + if s.operator == "==": + a = version + b = version + elif s.operator == ">=": + a = version + elif s.operator == ">": + a = next_version(version) + elif s.operator == "<": + b = version + elif s.operator == "<=": + b = next_version(version) + elif s.operator == "~=": + a = version + major, minor, *_ = version + b = promote_max_version((major, minor), len(a)) + else: + raise NotImplementedError() # unsupported operators: "!=", "===" + + assert len(a) == len(b) + return (a, b) + + +def version_str_to_tuple(version: str): + v = tuple(map(int, version.split("."))) + for i in v: + if i < 0: + raise ValueError() + return v + + +def next_version(version): + version = list(version) + version[-1] += 1 + return tuple(version) + + +def promote_min_version(a, l): + return tuple(list(a) + [-inf] * (l - len(a))) + + +def promote_max_version(b, l): + return tuple(list(b) + [inf] * (l - len(b))) + + +def promote_intervals(x, y): + l = max(len(x[0]), len(y[0])) + return promote_interval(x, l), promote_interval(y, l) + + +def promote_interval(x, l): + xa, xb = x + if len(xa) == l and len(xb) == l: + return x + return (promote_min_version(xa, l), promote_max_version(xb, l)) + + +def interval_intersection(x, y): + assert len(x) == len(y) + xa, xb = x + ya, yb = y + if xb < ya or yb < xa: + return tuple() + return (max(xa, ya), min(xb, yb)) + + +def write_requirements(dir, reqs): + with open(dir / "requirements.txt", "w+") as f: + f.write("\n".join(map(str, reqs))) diff --git a/src/core/api/app/lib/requirements_test.py b/src/core/api/app/lib/requirements_test.py new file mode 100644 index 00000000..0b2045e0 --- /dev/null +++ b/src/core/api/app/lib/requirements_test.py @@ -0,0 +1,57 @@ +# flake8: noqa: E202,E241 + +import unittest +from math import inf + +from packaging.specifiers import Specifier + +import lib.requirements as r + +# fmt: off +conversions = [ + ("==1", ((1,), (1,) ), "==1" ), + ("==1.0", ((1, 0), (1, 0) ), "==1.0" ), + ("==1.2.3", ((1, 2, 3), (1, 2, 3) ), "==1.2.3" ), + ("~=1.2.3", ((1, 2, 3), (1, 2, inf) ), "~=1.2.3" ), + ("~=1.2.3.4", ((1, 2, 3, 4), (1, 2, inf, inf)), "~=1.2.3.4" ), + (">=2.0", ((2, 0), (inf, inf) ), ">=2.0" ), + (">3.1", ((3, 2), (inf, inf) ), ">=3.2" ), + ("<4.2", ((-inf, -inf), (4, 2) ), "<4.2" ), + ("<=5.3", ((-inf, -inf), (5, 4) ), "<5.4" ), +] +# fmt: on + + +class Test(unittest.TestCase): + def setUp(self): + self.addTypeEqualityFunc(r.Requirement, self.assertRequirementEqual) + + def assertRequirementEqual(self, a: r.Requirement, b: r.Requirement, msg=None): + return self.assertEqual(str(a), str(b), msg=msg) + + def test_specifier_to_interval(self): + for s, expected, _ in conversions: + self.assertEqual(r.specifier_to_interval(Specifier(s)), expected) + + def test_interval_to_specifier_str(self): + for _, x, expected in conversions: + self.assertEqual(r.interval_to_specifier_str(x), expected) + + def test_merge_requirements(self): + should_succeed = [ + (("a == 1.0", "a==1.0"), "a==1.0"), + (("a == 1.2.3", "a==1.2"), "a==1.2.3"), + (("a ~= 1.2.3", "a==1.2"), "a~=1.2.3"), + ] + should_fail = [ + (("a == 1.0", "a==2.0"), "cannot merge version specifiers"), + (("a ~= 1.0", "a==2.0"), "cannot merge version specifiers"), + (("a < 1.0", "a > 1.0"), "cannot merge version specifiers"), + ] + for (a, b), c in should_succeed: + ra, rb, rc = r.Requirement(a), r.Requirement(b), r.Requirement(c) + self.assertEqual(r.merge_requirements(ra, rb), rc) + for (a, b), msg in should_fail: + ra, rb = r.Requirement(a), r.Requirement(b) + with self.assertRaisesRegex(Exception, msg): + r.merge_requirements(ra, rb) diff --git a/src/core/api/app/lib/slack.py b/src/core/api/app/lib/slack.py new file mode 100644 index 00000000..3b053ff1 --- /dev/null +++ b/src/core/api/app/lib/slack.py @@ -0,0 +1,15 @@ +from django.conf import settings +from slack_sdk import WebClient + +client = None +if settings.SLACK_BOT_TOKEN: + client = WebClient(token=settings.SLACK_BOT_TOKEN) + + +def post_slack_message(channel, text, blocks, thread_ts=None): + response = None + if client: + response = client.chat_postMessage( + channel=channel, text=text, blocks=blocks, thread_ts=thread_ts + ) + return response diff --git a/src/core/api/app/lib/tools.py b/src/core/api/app/lib/tools.py new file mode 100644 index 00000000..09855f19 --- /dev/null +++ b/src/core/api/app/lib/tools.py @@ -0,0 +1,38 @@ +def parse_image_uri(image_uri): + default_registry = "docker.io" + repository, tag = image_uri.split(":") + chunks = repository.split("/") + registry = chunks[0] + if "." in registry: + # valid repository + repository = "/".join(chunks[1:]) + else: + registry = default_registry + return registry, repository, tag + + +def get_related_environment(namespace): + """Given a namespace returns the related environment.""" + from projects.models import NAMESPACE_PREFIX, Environment + + if not namespace.startswith(NAMESPACE_PREFIX): + return + slug = namespace[len(NAMESPACE_PREFIX) :] + environment: Environment = ( + Environment.objects.filter(slug=slug).select_related("cluster").first() + ) + return environment + + +def get_related_account(environment): + """Given an environment returns the related account.""" + from users.models import Account + + if environment is None: + return + account: Account = ( + Account.objects.filter(projects__environments__id=environment.id) + .select_related("plan") + .first() + ) + return account diff --git a/src/core/api/app/lib/utils.py b/src/core/api/app/lib/utils.py new file mode 100644 index 00000000..11d82fd8 --- /dev/null +++ b/src/core/api/app/lib/utils.py @@ -0,0 +1,163 @@ +""" +Utilities that don't quite fit somewhere else yet. This is useful when you come +up with a function that is generic and you don't know where to put it yet. +Don't let this file grow too big. Move things out of here once it is clearer +where to put them. +""" + +# TODO: This file shoudn't depend on django, redis, celery, etc. When running +# from cli.py or a regular ipython shell it forces us to install all those +# packages which we don't need. Make a separate module for django utils. + +import logging +import time +from contextlib import ContextDecorator, contextmanager +from datetime import date, datetime, timedelta, timezone +from functools import reduce, wraps + +logger = logging.getLogger(__name__) + +LOCK_EXPIRE = 60 * 10 # Lock expires in 10 minutes + + +def m2m_changed_subjects_and_objects(kwargs): + if not kwargs["reverse"]: + subjects = [kwargs["instance"]] + objects = kwargs["pk_set"] + else: + subjects = kwargs["model"].objects.filter(pk__in=kwargs["pk_set"]) + objects = {kwargs["instance"].pk} + return subjects, objects + + +def date_to_datetime(d): + assert isinstance(d, date) + return datetime(d.year, d.month, d.day, tzinfo=timezone.utc) + + +def now(): + return datetime.now(timezone.utc).replace(tzinfo=timezone.utc) + + +def yesterday(): + return date_to_datetime((now() - timedelta(days=1)).date()) + + +def day_interval_until_now(day): + from django.utils import timezone as django_timezone + + t_a, t_b = day_interval(day) + now = django_timezone.now().timestamp() + return min(t_a, now), min(t_b, now) + + +def day_interval(day): + day = date_to_datetime(day) + t_a = day.timestamp() + t_b = (day + timedelta(days=1)).timestamp() + return t_a, t_b + + +@contextmanager +def task_lock(lock_id): + from django.core.cache import cache + from redis.exceptions import LockError + + timeout_at = time.monotonic() + LOCK_EXPIRE - 3 + lock = cache.lock(lock_id, timeout=LOCK_EXPIRE) + status = lock.acquire(blocking=False) + try: + yield status + finally: + if time.monotonic() < timeout_at and status: + try: + lock.release() + except LockError: + pass + + +def get_pending_tasks(task_name): + """Get pending tasks for a given task_name""" + from datacoves.celery import app + + matched_tasks = [] + inspect = app.control.inspect() + reserved = inspect.reserved() + if reserved: + tasks = reduce(lambda x, y: x + y, reserved.values()) + for task in tasks: + if task.get("type") == task_name: + matched_tasks.append(task) + return matched_tasks + + +def cancel_pending_task(task_id: str): + from datacoves.celery import app + + app.control.revoke(task_id) + + +def same_dicts_in_lists(list1, list2): + """Check if lists contain the same dictionaries in any order.""" + return len(list1) == len(list2) and all(x in list2 for x in list1) + + +# Define a custom function to serialize datetime objects +def serialize_datetime(obj): + if isinstance(obj, datetime): + return obj.isoformat() + raise TypeError("Type not serializable") + + +def force_ipv4(func): + import requests + + def wrapper(*args, **kwargs): + has_ipv6 = requests.packages.urllib3.util.connection.HAS_IPV6 + try: + requests.packages.urllib3.util.connection.HAS_IPV6 = False + return func(*args, **kwargs) + finally: + requests.packages.urllib3.util.connection.HAS_IPV6 = has_ipv6 + + return wrapper + + +class Timer(ContextDecorator): + def __init__(self, name="Process"): + self.name = name + self.start_time = None + self.end_time = None + + def __enter__(self): + self.start_time = time.perf_counter() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.end_time = time.perf_counter() + elapsed_time = self.end_time - self.start_time + logger.info(f"{self.name} took {elapsed_time:.4f} seconds") + + +def cache_result(timeout=3600, key_prefix=None): + from django.core.cache import cache + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + key_elements = [key_prefix or func.__name__] + if args: + key_elements.extend(map(str, args)) + if kwargs: + key_elements.extend(f"{k}={v}" for k, v in sorted(kwargs.items())) + cache_key = ":".join(key_elements) + + result = cache.get(cache_key) + if result is None: + result = func(*args, **kwargs) + cache.set(cache_key, result, timeout) + return result + + return wrapper + + return decorator diff --git a/src/core/api/app/manage.py b/src/core/api/app/manage.py new file mode 100755 index 00000000..4756df47 --- /dev/null +++ b/src/core/api/app/manage.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +"""Django's command-line utility for administrative tasks.""" +import os +import sys + + +def main(): + """Run administrative tasks.""" + settings = ( + "datacoves.unit_tests_settings" if "test" in sys.argv else "datacoves.settings" + ) + os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings) + try: + from django.core.management import execute_from_command_line + except ImportError as exc: + raise ImportError( + "Couldn't import Django. Are you sure it's installed and " + "available on your PYTHONPATH environment variable? Did you " + "forget to activate a virtual environment?" + ) from exc + execute_from_command_line(sys.argv) + + +if __name__ == "__main__": + main() diff --git a/src/core/api/app/notifications/__init__.py b/src/core/api/app/notifications/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/notifications/admin.py b/src/core/api/app/notifications/admin.py new file mode 100644 index 00000000..1053f976 --- /dev/null +++ b/src/core/api/app/notifications/admin.py @@ -0,0 +1,33 @@ +from django.contrib import admin, messages +from django_object_actions import DjangoObjectActions + +from .models import AccountNotification, Notification + + +@admin.register(Notification) +class NotificationAdmin(DjangoObjectActions, admin.ModelAdmin): + list_display = ["kind", "title", "created_at"] + list_filter = ("kind",) + search_fields = ("title", "body") + + def send_to_channels(self, request, obj): + obj.send_to_channels() + messages.add_message(request, messages.INFO, "Notification sent.") + + send_to_channels.label = "Send notification" + send_to_channels.short_description = "Send current notification" + + change_actions = ("send_to_channels",) + + def get_readonly_fields(self, request, obj=None): + return [f.name for f in self.model._meta.fields] + + +@admin.register(AccountNotification) +class AccountNotificationAdmin(admin.ModelAdmin): + list_display = ["account", "kind", "title", "created_at"] + list_filter = ("account", "kind") + search_fields = ("title", "body") + + def get_readonly_fields(self, request, obj=None): + return [f.name for f in self.model._meta.fields] diff --git a/src/core/api/app/notifications/apps.py b/src/core/api/app/notifications/apps.py new file mode 100644 index 00000000..3a084766 --- /dev/null +++ b/src/core/api/app/notifications/apps.py @@ -0,0 +1,6 @@ +from django.apps import AppConfig + + +class NotificationsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "notifications" diff --git a/src/core/api/app/notifications/migrations/0001_initial.py b/src/core/api/app/notifications/migrations/0001_initial.py new file mode 100644 index 00000000..b324d31a --- /dev/null +++ b/src/core/api/app/notifications/migrations/0001_initial.py @@ -0,0 +1,54 @@ +# Generated by Django 3.2.6 on 2022-12-29 20:17 + +import django.db.models.deletion +import notifications.models +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('users', '0011_account_notifications_enabled'), + ('projects', '0070_blockedpodcreationrequest'), + ('clusters', '0017_clusteralert'), + ] + + operations = [ + migrations.CreateModel( + name='Notification', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('title', models.CharField(max_length=400)), + ('body', models.TextField()), + ('email_sent_to', models.JSONField(blank=True, default=list, null=True)), + ('statuses', models.JSONField(blank=True, default=notifications.models.get_default_statuses, null=True)), + ('kind', models.CharField(choices=[('cluster', 'Cluster notification'), ('billing', 'Billing notification')], max_length=10)), + ('cluster_alert', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='clusters.clusteralert')), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='AccountNotification', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('title', models.CharField(max_length=400)), + ('body', models.TextField()), + ('email_sent_to', models.JSONField(blank=True, default=list, null=True)), + ('statuses', models.JSONField(blank=True, default=notifications.models.get_default_statuses, null=True)), + ('kind', models.CharField(choices=[('cluster', 'Cluster notification'), ('billing', 'Billing notification')], max_length=10)), + ('in_app_read_by', models.JSONField(blank=True, default=list, null=True)), + ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.account')), + ('cluster_alert', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='clusters.clusteralert')), + ('environment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.environment')), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/notifications/migrations/0002_auto_20230120_1821.py b/src/core/api/app/notifications/migrations/0002_auto_20230120_1821.py new file mode 100644 index 00000000..dbf31078 --- /dev/null +++ b/src/core/api/app/notifications/migrations/0002_auto_20230120_1821.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.16 on 2023-01-20 18:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('notifications', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='accountnotification', + name='link', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AddField( + model_name='notification', + name='link', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/notifications/migrations/0003_auto_20230208_0834.py b/src/core/api/app/notifications/migrations/0003_auto_20230208_0834.py new file mode 100644 index 00000000..10d5971c --- /dev/null +++ b/src/core/api/app/notifications/migrations/0003_auto_20230208_0834.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.16 on 2023-02-08 08:34 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('notifications', '0002_auto_20230120_1821'), + ] + + operations = [ + migrations.AddField( + model_name='accountnotification', + name='slack_metadata', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AddField( + model_name='notification', + name='slack_metadata', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/notifications/migrations/0004_auto_20240701_2118.py b/src/core/api/app/notifications/migrations/0004_auto_20240701_2118.py new file mode 100644 index 00000000..65c09257 --- /dev/null +++ b/src/core/api/app/notifications/migrations/0004_auto_20240701_2118.py @@ -0,0 +1,71 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +from django.db import migrations, models +import django.db.models.deletion +import notifications.models + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0039_auto_20240701_2118'), + ('notifications', '0003_auto_20230208_0834'), + ] + + operations = [ + migrations.AlterField( + model_name='accountnotification', + name='cluster_alert', + field=models.ForeignKey(blank=True, help_text='Associated cluster alert, if applicable', null=True, on_delete=django.db.models.deletion.SET_NULL, to='clusters.clusteralert'), + ), + migrations.AlterField( + model_name='accountnotification', + name='email_sent_to', + field=models.JSONField(blank=True, default=list, help_text="JSON list of user ID's the notification was sent to", null=True), + ), + migrations.AlterField( + model_name='accountnotification', + name='in_app_read_by', + field=models.JSONField(blank=True, default=list, help_text='Not used?', null=True), + ), + migrations.AlterField( + model_name='accountnotification', + name='link', + field=models.JSONField(blank=True, default=dict, help_text='Unused?', null=True), + ), + migrations.AlterField( + model_name='accountnotification', + name='slack_metadata', + field=models.JSONField(blank=True, default=dict, help_text='Slack metadata is used by slack notification messages. This metadata is set with convienance methods: set_slack_link, set_slack_message_identifier, set_slack_data ... It is not clear to me if this metadata is actually used anywhere.', null=True), + ), + migrations.AlterField( + model_name='accountnotification', + name='statuses', + field=models.JSONField(blank=True, default=notifications.models.get_default_statuses, help_text='A dictionary mapping notification channel names to the status of the message in each channel. This is documented in greater detail on the BaseNotification model.', null=True), + ), + migrations.AlterField( + model_name='notification', + name='cluster_alert', + field=models.ForeignKey(blank=True, help_text='Associated cluster alert, if applicable', null=True, on_delete=django.db.models.deletion.SET_NULL, to='clusters.clusteralert'), + ), + migrations.AlterField( + model_name='notification', + name='email_sent_to', + field=models.JSONField(blank=True, default=list, help_text="JSON list of user ID's the notification was sent to", null=True), + ), + migrations.AlterField( + model_name='notification', + name='link', + field=models.JSONField(blank=True, default=dict, help_text='Unused?', null=True), + ), + migrations.AlterField( + model_name='notification', + name='slack_metadata', + field=models.JSONField(blank=True, default=dict, help_text='Slack metadata is used by slack notification messages. This metadata is set with convienance methods: set_slack_link, set_slack_message_identifier, set_slack_data ... It is not clear to me if this metadata is actually used anywhere.', null=True), + ), + migrations.AlterField( + model_name='notification', + name='statuses', + field=models.JSONField(blank=True, default=notifications.models.get_default_statuses, help_text='A dictionary mapping notification channel names to the status of the message in each channel. This is documented in greater detail on the BaseNotification model.', null=True), + ), + ] diff --git a/src/core/api/app/notifications/migrations/__init__.py b/src/core/api/app/notifications/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/notifications/models.py b/src/core/api/app/notifications/models.py new file mode 100644 index 00000000..eca22e7d --- /dev/null +++ b/src/core/api/app/notifications/models.py @@ -0,0 +1,414 @@ +import logging + +from core.models import DatacovesModel +from django.conf import settings +from django.db import models, transaction +from invitations.services import EmailSender + +from lib.slack import post_slack_message + +logger = logging.getLogger(__name__) + + +def get_default_statuses(): + return {"email": "skipped", "slack": "skipped"} + + +class BaseNotification(DatacovesModel): + """Notification Bass Class + + Unfortunately, Django's model documentor doesn't pick this up on + the model list, since this has most of the model guts. We'll document + it here anyway. + + Notifications are sent through 'channels'. The statuses below apply + to all the available channels; pending means we're trying to send + the notification, processed means we've sent it, error means we failed + to send it, and skipped means we intentionally did not send to that + channel. + + The 'status' field is a JSON dictionary that maps channel names to + statuses. + + ========= + Constants + ========= + + ----------------------- + Channel Delivery Status + ----------------------- + + - STATUS_PENDING + - STATUS_PROCESSED + - STATUS_ERROR + - STATUS_SKIPPED + + ------------------ + Notification Kinds + ------------------ + + - KIND_CLUSTER - Cluster notifications + - KIND_BILLING - Billing notifications + - NOTIFICATION_KINDS - tuple of tuple pairs for use in select boxes + + ======= + Methods + ======= + + - **deactivate_channel(channel)** - Deactivates a notification type + - **set_slack_link(name, url)** - Sets the 'link' field in slack metadata + - **get_slack_link()** - Retrieves the above + - **set_slack_message_identifier(message_id)** - Sets slack Message + identifier ('message_id' key in slack metadata). This is used by + send_slack + - **get_slack_message_identifier()** - Retrieves the above + - **set_slack_data(data)** - sets slack metadata 'data' field + - **get_slack_data()** - Retrieves the above + - **send_to_channels(...)** - Abstract, must be implemented by child + - **save(send_on_save=False, ...)** - Overrides save to add a + send_on_save parameter (default false) which can be used to + automatically call send_to_channels on DB transaction commit. + - **can_send_notification(channel)** - Can we send a notification to + the given channel? If channel is None or not provided, this will + always return False. + - **send_slack()** - Sends the notification via slack. Does not check + if the notification should be sent, it just sends it. It does not + queue it. + """ + + STATUS_PENDING = "pending" + STATUS_PROCESSED = "processed" + STATUS_ERROR = "error" + STATUS_SKIPPED = "skipped" + + KIND_CLUSTER = "cluster" + KIND_BILLING = "billing" + + NOTIFICATION_KINDS = ( + ( + KIND_CLUSTER, + "Cluster notification", + ), + ( + KIND_BILLING, + "Billing notification", + ), + ) + + created_at = models.DateTimeField(auto_now_add=True, editable=False) + cluster_alert = models.ForeignKey( + "clusters.ClusterAlert", + on_delete=models.SET_NULL, + blank=True, + null=True, + help_text="Associated cluster alert, if applicable", + ) + title = models.CharField(max_length=400) + body = models.TextField() + link = models.JSONField(default=dict, blank=True, null=True, help_text="Unused?") + email_sent_to = models.JSONField( + default=list, + blank=True, + null=True, + help_text="JSON list of user ID's the notification was sent to", + ) + statuses = models.JSONField( + default=get_default_statuses, + null=True, + blank=True, + help_text="A dictionary mapping notification channel names to the " + "status of the message in each channel. This is documented in " + "greater detail on the BaseNotification model.", + ) + kind = models.CharField(max_length=10, choices=NOTIFICATION_KINDS) + slack_metadata = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Slack metadata is used by slack notification " + "messages. This metadata is set with convienance methods: " + "set_slack_link, set_slack_message_identifier, set_slack_data ... " + "It is not clear to me if this metadata is actually used anywhere.", + ) + + class Meta: + abstract = True + + def __str__(self): + return self.title + + @property + def status(self) -> bool: + """Returns an aggregated status of all channels; if any are in + STATUS_ERROR, this returns STATUS_ERROR. If any are in + STATUS_PENDING, this returns STATUS_PENDING. Otherwise, it returns + STATUS_PROCESSED. + """ + + statuses = [v for _, v in self.statuses.items()] + if self.STATUS_ERROR in statuses: + return self.STATUS_ERROR + elif self.STATUS_PENDING in statuses: + return self.STATUS_PENDING + return self.STATUS_PROCESSED + + def deactivate_channel(self, channel: str): + """Workaround to disable a notification type.""" + statuses = self.statuses or {} + statuses[channel] = self.STATUS_PROCESSED + self.statuses = statuses + + def set_slack_link(self, name: str, url: str): + """Sets the slack metadata 'link' field to the given name and URL.""" + slack_metadata = self.slack_metadata or {} + slack_metadata["link"] = {"name": name, "url": url} + self.slack_metadata = slack_metadata + + def get_slack_link(self): + """Returns a dictionary containing keys 'name' and 'url', or an + empty dictionary if no slack link""" + + return (self.slack_metadata or {}).get("link") or {} + + def set_slack_message_identifier(self, message_id): + """Sets the 'message_id' field from slack_metadata""" + + slack_metadata = self.slack_metadata or {} + slack_metadata["message_id"] = message_id + self.slack_metadata = slack_metadata + + def get_slack_message_identifier(self): + """Returns the 'message_id' field from slack_metadata""" + + return (self.slack_metadata or {}).get("message_id") + + def set_slack_data(self, data): + """Sets the 'data' field from slack_metadata""" + slack_metadata = self.slack_metadata or {} + slack_metadata["data"] = data + self.slack_metadata = slack_metadata + + def get_slack_data(self): + """Returns the 'data' field from slack_metadata""" + return (self.slack_metadata or {}).get("data") or {} + + def send_to_channels(self, **kwargs): + """Abstract""" + raise NotImplementedError() + + def save(self, *args, send_on_save=False, **kwargs): + """Provides the send_on_save parameter; if True, we will send + the notification on transaction commit.""" + + super().save(*args, **kwargs) + if send_on_save: + transaction.on_commit(lambda: self.send_to_channels()) + + def can_send_notification(self, channel: str = None) -> bool: + """True if we can send a notification. If channel is None or + not in the 'statuses' dictionary as a key, then this returns False. + + Otherwise, it will return False if the status is already processed, + or use ClusterAlert.can_send_notification if cluster_alert is not + None. + + If it passes all those checks and doesn't have a ClusterAlert, + it will return True. + """ + + if channel is None or channel not in self.statuses: + return False + status = self.statuses.get(channel) + if status == self.STATUS_PROCESSED: + logger.info("This notification is already processed") + return False + + if self.cluster_alert: + return self.cluster_alert.can_send_notification(channel) + + return True + + def send_slack(self): + """Sends the notification via slack. This doesn't do any checking + on IF we should send the notification, it just sends it. It does + not queue it. + """ + + link = self.get_slack_link() + text = link["name"] + url = link["url"] + data = self.get_slack_data() + header = { + "type": "header", + "text": { + "type": "plain_text", + "text": f":rotating_light: {self.title} :rotating_light:", + "emoji": True, + }, + } + divider = {"type": "divider"} + sections = [] + for field, value in data.items(): + value = data.get(field) + if not value: + continue + label = " ".join([c.capitalize() for c in field.split("_")]) + f = { + "type": "section", + "text": { + "type": "mrkdwn", + "text": f"*{label}*: {value}", + }, + } + sections.append(f) + link = { + "type": "actions", + "elements": [ + { + "type": "button", + "text": {"type": "plain_text", "text": text}, + "value": text, + "url": url, + } + ], + } + blocks = [header, divider, *sections, divider, link] + try: + response = post_slack_message( + settings.SLACK_CLUSTER_CHANNEL, self.title, blocks + ) + self.statuses["slack"] = self.STATUS_PROCESSED + message_id = response.data.get("ts") + self.set_slack_message_identifier(message_id) + except Exception as e: + logger.warning(f"Error trying to send slack notification: {e}") + self.statuses["slack"] = self.STATUS_ERROR + self.save() + + +class Notification(BaseNotification): + """Notification Class + + There is extensive notification in notifications.BaseNotification, but + Django's documentation generator won't include an abstract model class + in its documentation. It is recommended you read that data as it has + most of the notes about the internal functionings of Notifications. + + ======= + Methods + ======= + + - **send_to_channels(...)** - Arguments are ignored. A thin wrapper + around send_slack_notification + - **send_slack_notification()** - If the 'slack' channel is available + for notification, it will send a slack message using the queue for + async purposes. + """ + + def send_to_channels(self, **kwargs): + """This is a wrapper around send_slack_notification to implement the + abstract method from the base class""" + + self.send_slack_notification() + + def send_slack_notification(self): + """Sends a queued notification to slack""" + + from notifications.tasks import send_slack_notification + + if self.can_send_notification(channel="slack"): + self.statuses["slack"] = self.STATUS_PENDING + self.save(update_fields=["statuses"]) + send_slack_notification.delay(self.id) + + +class AccountNotification(BaseNotification): + """Account level notifications + + These are sent to account admin users via email if the email channel + is available. It will also send to account-level slack channels. + + ======= + Methods + ======= + + - **send_to_channels(...)** - Takes kwargs but ignores them. Sends + to both email and slack channels. + - **send_slack_notification()** - Sends to the account slack + - **send_email_notifications()** - Sends to the account admin emails + - **send_email()** - The implementation of the email send, without the + pre-checks done by send_email_notifications + - **can_send_notification(channel)** - Override of a base class method + in order to allow accounts to enable / disable individual notification + kinds. + """ + + account = models.ForeignKey("users.Account", on_delete=models.CASCADE) + environment = models.ForeignKey( + "projects.Environment", on_delete=models.SET_NULL, blank=True, null=True + ) + in_app_read_by = models.JSONField( + default=list, blank=True, null=True, help_text="Not used?" + ) + + def send_to_channels(self, **kwargs): + """Implements the send_to_channels method from the base class, and + sends email/slack notofications.""" + + self.send_email_notifications() + self.send_slack_notification() + + def send_slack_notification(self): + # FIXME: should be use the account slack configuration from account models, this configuration + # doesn't exists yet + from notifications.tasks import send_slack_account_notification + + if self.can_send_notification(channel="slack"): + self.statuses["slack"] = self.STATUS_PENDING + self.save(update_fields=["statuses"]) + send_slack_account_notification.delay(self.id) + + def send_email_notifications(self): + """Checks to be sure the email channel is available before sending + the notification""" + + from notifications.tasks import send_email_notifications + + if self.can_send_notification(channel="email"): + self.statuses["email"] = self.STATUS_PENDING + self.save(update_fields=["statuses"]) + send_email_notifications.delay(self.id) + + def send_email(self): + """Sends the email to the admin users, using the + notifications/email/email_alert template. The only variable + provided to the template is 'body' + """ + + from users.models import Account + + users = Account.get_admin_users(slug=self.account.slug).values_list( + "id", "email" + ) + user_ids = [x[0] for x in users] + users_emails = [x[1] for x in users] + ctx = { + "body": self.body, + } + email_template = "notifications/email/email_alert" + self.email_sent_to = user_ids + try: + EmailSender.send_mail(email_template, users_emails, ctx, subject=self.title) + self.statuses["email"] = self.STATUS_PROCESSED + except Exception as e: + logger.error(f"Error trying to send notification email: {e}") + self.statuses["email"] = self.STATUS_ERROR + self.save(update_fields=["email_sent_to", "statuses"]) + + def can_send_notification(self, channel=None): + """Overriding this method to add the ability to check if + the account has enabled the notifications""" + base_can_send_notification = super().can_send_notification(channel=channel) + return ( + base_can_send_notification and self.account.notifications_enabled[self.kind] + ) diff --git a/src/core/api/app/notifications/tasks.py b/src/core/api/app/notifications/tasks.py new file mode 100644 index 00000000..b8f2cb02 --- /dev/null +++ b/src/core/api/app/notifications/tasks.py @@ -0,0 +1,35 @@ +from celery.utils.log import get_task_logger +from notifications.models import AccountNotification, Notification + +from datacoves.celery import app + +logger = get_task_logger(__name__) + + +@app.task +def send_email_notifications(account_notification_id): + logger.info( + f"Sending email notification for account notification id: {account_notification_id}" + ) + account_notification: AccountNotification = AccountNotification.objects.get( + id=account_notification_id + ) + account_notification.send_email() + + +@app.task +def send_slack_notification(notification_id): + logger.info(f"Sending slack notification for notification id: {notification_id}") + notification: Notification = Notification.objects.get(id=notification_id) + notification.send_slack() + + +@app.task +def send_slack_account_notification(account_notification_id): + logger.info( + f"Sending slack account notification for account notification id: {account_notification_id}" + ) + account_notification: AccountNotification = AccountNotification.objects.get( + id=account_notification_id + ) + account_notification.send_slack() diff --git a/src/core/api/app/notifications/templates/notifications/content/blocked_pod_creation_message.html b/src/core/api/app/notifications/templates/notifications/content/blocked_pod_creation_message.html new file mode 100644 index 00000000..5eaa2373 --- /dev/null +++ b/src/core/api/app/notifications/templates/notifications/content/blocked_pod_creation_message.html @@ -0,0 +1,6 @@ +

+ Hello {{ name }},

+

+ This email is to inform you the account ({{ account_name }}) have been exceeded maximum execution time for the + selected plan. +

\ No newline at end of file diff --git a/src/core/api/app/notifications/templates/notifications/content/blocked_pod_creation_subject.txt b/src/core/api/app/notifications/templates/notifications/content/blocked_pod_creation_subject.txt new file mode 100644 index 00000000..654715dc --- /dev/null +++ b/src/core/api/app/notifications/templates/notifications/content/blocked_pod_creation_subject.txt @@ -0,0 +1,4 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %}Your account {{ account_name }} exceeded maximum execution time{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/notifications/templates/notifications/email/email_alert_message.html b/src/core/api/app/notifications/templates/notifications/email/email_alert_message.html new file mode 100644 index 00000000..ee594c6b --- /dev/null +++ b/src/core/api/app/notifications/templates/notifications/email/email_alert_message.html @@ -0,0 +1,179 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %} + + + + + + + + + + + + + + + + + + + + + +{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/notifications/templates/notifications/email/email_alert_subject.txt b/src/core/api/app/notifications/templates/notifications/email/email_alert_subject.txt new file mode 100644 index 00000000..253c6a7d --- /dev/null +++ b/src/core/api/app/notifications/templates/notifications/email/email_alert_subject.txt @@ -0,0 +1,4 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %}New alert in your account {{ account_name }}{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/projects/__init__.py b/src/core/api/app/projects/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/projects/admin.py b/src/core/api/app/projects/admin.py new file mode 100644 index 00000000..75a8b47f --- /dev/null +++ b/src/core/api/app/projects/admin.py @@ -0,0 +1,631 @@ +import json +import operator +from functools import reduce + +from clusters import workspace +from core.fields import EncryptedJSONField +from django.contrib import admin, messages +from django.db import models +from django.db.models import Q +from django.utils.safestring import mark_safe +from django_json_widget.widgets import JSONEditorWidget +from django_object_actions import DjangoObjectActions +from projects.exceptions import SQLHookException +from pygments import highlight +from pygments.formatters.html import HtmlFormatter +from pygments.lexers.data import JsonLexer + +from datacoves.admin import BaseModelAdmin + +from .cryptography import generate_ssh_key_pair, generate_ssl_key_pair +from .models import ( + BlockedPodCreationRequest, + ConnectionTemplate, + ConnectionType, + Environment, + EnvironmentIntegration, + Profile, + ProfileFile, + ProfileImageSet, + Project, + Release, + Repository, + ServiceCredential, + SSHKey, + SSLKey, + UserCredential, + UserEnvironment, + UserRepository, +) +from .tasks import build_profile_image_set + + +class EnvironmentTypeFilter(admin.SimpleListFilter): + title = "Type" + parameter_name = "type" + + def lookups(self, request, model_admin): + if "project__id__exact" in request.GET: + id = request.GET["project__id__exact"] + types = set( + [env.type for env in model_admin.model.objects.all().filter(project=id)] + ) + else: + types = set([env.type for env in model_admin.model.objects.all()]) + return [(t, t) for t in types] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(type__exact=self.value()) + + +class ProjectFilter(admin.SimpleListFilter): + title = "Project" + parameter_name = "project__id" + + def lookups(self, request, model_admin): + if "project__account__id" in request.GET: + id = request.GET["project__account__id"] + projects = set( + [ + env.project + for env in model_admin.model.objects.all().filter( + project__account__id=id + ) + ] + ) + else: + projects = set([env.project for env in model_admin.model.objects.all()]) + return [(p.id, p.name) for p in sorted(projects, key=lambda p: p.name)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(project__id__exact=self.value()) + + +class AccountByProjectFilter(admin.SimpleListFilter): + title = "Account" + parameter_name = "project__account__id" + + def lookup_allowed(self, key): + return True + + def lookups(self, request, model_admin): + if "project__id" in request.GET: + id = request.GET["project__id"] + accounts = set( + [ + env.project.account + for env in model_admin.model.objects.all().filter(project__id=id) + ] + ) + else: + accounts = set( + [env.project.account for env in model_admin.model.objects.all()] + ) + return [(a.id, a.name) for a in sorted(accounts, key=lambda a: a.name)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(project__account__id__exact=self.value()) + + +class AccountFilter(admin.SimpleListFilter): + title = "Account" + parameter_name = "environment__project__account" + + def lookups(self, request, model_admin): + filter_names = ( + "environment__id", + "user__id", + ) + filter_clauses = [ + Q((filter, request.GET[filter])) + for filter in filter_names + if request.GET.get(filter) + ] + + qs = model_admin.model.objects.select_related( + "environment__project__account" + ).only( + "environment__project__account__name", + "environment__project__account__slug", + ) + if filter_clauses: + accounts = set( + [ + user_env.environment.project.account + for user_env in qs.filter(reduce(operator.and_, filter_clauses)) + ] + ) + else: + accounts = set([user_env.environment.project.account for user_env in qs]) + return [ + (u.id, u.name) for u in sorted(accounts, key=lambda account: account.name) + ] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(environment__project__account__exact=self.value()) + + +class EnvironmentFilter(admin.SimpleListFilter): + title = "Environment" + parameter_name = "environment__id" + + def lookups(self, request, model_admin): + filter_names = ( + "environment__project__account", + "user__id", + ) + filter_clauses = [ + Q((filter, request.GET[filter])) + for filter in filter_names + if request.GET.get(filter) + ] + + qs = model_admin.model.objects.select_related("environment").only( + "environment__name", "environment__slug" + ) + if filter_clauses: + envs = set( + [ + user_env.environment + for user_env in qs.filter(reduce(operator.and_, filter_clauses)) + ] + ) + else: + envs = set([user_env.environment for user_env in qs]) + return [(u.id, u.slug) for u in sorted(envs, key=lambda env: env.slug)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(environment__id__exact=self.value()) + + +class UserFilter(admin.SimpleListFilter): + title = "User" + parameter_name = "user__id" + + def lookups(self, request, model_admin): + filter_names = ( + "environment__project__account", + "environment__id", + "repository__id", + ) + filter_clauses = [ + Q((filter, request.GET[filter])) + for filter in filter_names + if request.GET.get(filter) + ] + + qs = model_admin.model.objects.select_related("user").only("user__email") + if filter_clauses: + users = set( + [ + user_env.user + for user_env in qs.filter(reduce(operator.and_, filter_clauses)) + ] + ) + else: + users = set([user_env.user for user_env in qs]) + return [(u.id, u.email) for u in sorted(users, key=lambda u: u.email)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(user__id__exact=self.value()) + + +class RepositoryFilter(admin.SimpleListFilter): + title = "Respository" + parameter_name = "repository__id" + + def lookups(self, request, model_admin): + if "user__id" in request.GET: + id = request.GET["user__id"] + repos = set( + [ + user_repo.repository + for user_repo in model_admin.model.objects.all().filter(user__id=id) + ] + ) + else: + repos = set( + [user_repo.repository for user_repo in model_admin.model.objects.all()] + ) + return [(r.id, r.git_url) for r in sorted(repos, key=lambda r: r.git_url)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(repository__id__exact=self.value()) + + +@admin.register(Project) +class ProjectAdmin(BaseModelAdmin, DjangoObjectActions, admin.ModelAdmin): + def create_permissions(self, request, obj): + obj.create_permissions() + obj.create_project_groups() + messages.add_message( + request, + messages.INFO, + "Project default groups and permissions successfully created.", + ) + + create_permissions.label = "Create Permissions" + create_permissions.short_description = "Create missing permissions for this project" + + change_actions = ("create_permissions",) + + readonly_fields = ("slug", "uid") + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + EncryptedJSONField: {"widget": JSONEditorWidget}, + } + list_display = ("account", "name", "slug", "repository", "release_branch") + list_filter = ("account",) + search_fields = ("name", "release_branch", "repository__git_url") + ordering = ("name", "slug", "account") + + +@admin.register(Environment) +class EnvironmentAdmin(BaseModelAdmin, DjangoObjectActions, admin.ModelAdmin): + def create_permissions(self, request, obj): + obj.create_permissions() + obj.create_environment_groups() + messages.add_message( + request, + messages.INFO, + "Environment default groups and permissions successfully created.", + ) + + create_permissions.label = "Create Permissions" + create_permissions.short_description = ( + "Create missing permissions for this environment" + ) + + def sync_cluster(self, request, obj): + workspace.sync(obj, "admin.sync_cluster", run_async=False) + messages.add_message( + request, messages.INFO, "Synchronization ran synchronously successfully." + ) + + sync_cluster.label = "Sync Cluster" + sync_cluster.short_description = "Apply changes on Kubernetes cluster" + + change_actions = ("sync_cluster", "create_permissions") + + def formfield_for_foreignkey(self, db_field, request, **kwargs): + if db_field.name == "release": + kwargs["queryset"] = Release.objects.order_by("name") + return super().formfield_for_foreignkey(db_field, request, **kwargs) + + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + EncryptedJSONField: {"widget": JSONEditorWidget}, + } + readonly_fields = ("slug", "workspace_generation") + list_display = ( + "account", + "project", + "name", + "slug", + "release_profile", + "profile", + "release", + ) + list_filter = ((AccountByProjectFilter), (ProjectFilter), (EnvironmentTypeFilter)) + search_fields = ( + "name", + "slug", + "project__account__slug", + "project__slug", + "release__name", + ) + ordering = ("name", "project", "type") + + +@admin.register(ConnectionType) +class ConnectionTypeAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + list_display = ("name", "slug", "account") + + +@admin.register(ConnectionTemplate) +class ConnectionAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + EncryptedJSONField: {"widget": JSONEditorWidget}, + } + list_display = ( + "account", + "project", + "name", + "type", + "for_users", + "connection_user", + ) + list_filter = ("project__account", "type", "connection_user") + search_fields = ("project__account__name", "project__name", "name") + + def account(self, obj): + return obj.project.account + + account.short_description = "Account" + account.admin_order_field = "project__account" + + +@admin.register(Repository) +class RepositoryAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("git_url", "provider") + list_filter = ("provider",) + search_fields = ("git_url",) + + +@admin.register(ServiceCredential) +class ServiceCredentialAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + EncryptedJSONField: {"widget": JSONEditorWidget}, + } + + def account(self, obj): + return obj.environment.project.account + + account.short_description = "Account" + account.admin_order_field = "environment__project__account" + list_display = ("account", "environment", "service", "name") + list_filter = ("environment__project__account", "service") + search_fields = ( + "name", + "service", + ) + + +@admin.register(UserCredential) +class UserCredentialAdmin(BaseModelAdmin, admin.ModelAdmin): + formfield_overrides = { + EncryptedJSONField: {"widget": JSONEditorWidget}, + models.JSONField: {"widget": JSONEditorWidget}, + } + + def account(self, obj): + return obj.environment.project.account + + account.short_description = "Account" + account.admin_order_field = "environment__project__account" + list_display = ("account", "environment", "user", "connection_template", "used_on") + list_filter = ( + (AccountFilter), + (EnvironmentFilter), + (UserFilter), + "connection_template", + ) + search_fields = ( + "name", + "used_on", + ) + + def save_model(self, request, obj, form, change): + try: + super().save_model(request, obj, form, change) + except SQLHookException as exc: + messages.error(request, exc) + + +@admin.register(SSHKey) +class SSHKeyAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ( + "id", + "key_type", + "usage", + "created_by", + "public_short", + "created_at", + ) + list_filter = ("created_by", "usage", "generated", "key_type") + search_fields = ("created_by__name",) + + def get_changeform_initial_data(self, request): + return generate_ssh_key_pair() + + +@admin.register(SSLKey) +class SSLKeyAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("id", "key_type", "public_short", "created_at") + list_filter = ("created_by", "usage", "generated", "key_type") + search_fields = ("created_by__name",) + + def get_changeform_initial_data(self, request): + return generate_ssl_key_pair() + + +@admin.register(UserRepository) +class UserRepositoryAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("user", "repository", "ssh_key") + list_filter = ((UserFilter), (RepositoryFilter)) + search_fields = ("user__name", "repository__git_url") + + +@admin.register(Release) +class ReleaseAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("name", "released_at") + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + search_fields = ("notes", "name", "code_server_libraries", "code_server_extensions") + + +class ProfileFileInline(BaseModelAdmin, admin.TabularInline): + model = ProfileFile + extra = 0 + readonly_fields = ("slug",) + + +@admin.register(Profile) +class ProfileAdmin(BaseModelAdmin, admin.ModelAdmin): + def formfield_for_foreignkey(self, db_field, request, **kwargs): + if db_field.name == "files_from": + kwargs["queryset"] = Profile.objects.order_by("name") + obj_id = request.resolver_match.kwargs.get("object_id") + if obj_id: + kwargs["queryset"] = kwargs["queryset"].exclude(id=int(obj_id)) + return super().formfield_for_foreignkey(db_field, request, **kwargs) + + list_display = ("name", "account", "files_from", "is_system_profile") + readonly_fields = ("slug", "created_by", "updated_by") + search_fields = ("name", "account__name") + inlines = [ProfileFileInline] + + def save_model(self, request, obj, form, change): + if change: + obj.updated_by = request.user + else: + obj.created_by = request.user + obj.save() + + @admin.display(boolean=True) + def is_system_profile(self, obj): + return obj.is_system_profile + + +@admin.register(ProfileImageSet) +class ProfileImageSetAdmin(BaseModelAdmin, DjangoObjectActions, admin.ModelAdmin): + def build_image_set(self, request, obj): + build_profile_image_set.delay(obj.id) + messages.add_message( + request, messages.INFO, "Images build task has been initiated." + ) + + build_image_set.label = "Build Image Set" + build_image_set.short_description = "Triggers docker images build process" + + def formfield_for_foreignkey(self, db_field, request, **kwargs): + if db_field.name == "release": + kwargs["queryset"] = Release.objects.order_by("name") + return super().formfield_for_foreignkey(db_field, request, **kwargs) + + change_actions = ("build_image_set",) + exclude = ["images_logs"] + list_display = ( + "profile", + "release", + "build_code_server", + "build_dbt_core_interface", + "build_airflow", + "build_ci_basic", + "build_ci_airflow", + ) + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + readonly_fields = ("images_status", "images", "images_logs_prettified") + search_fields = ("profile__name", "release__name", "python_requirements") + + def images_logs_prettified(self, instance): + """Function to display pretty version of our images logs""" + response = json.dumps(instance.images_logs, sort_keys=True, indent=4) + formatter = HtmlFormatter(style="colorful") + response = highlight(response, JsonLexer(), formatter) + style = f"" + return mark_safe(style + response) + + images_logs_prettified.short_description = "Images logs" + + +@admin.register(UserEnvironment) +class UserEnvironmentAdmin(BaseModelAdmin, admin.ModelAdmin): + list_select_related = ("user", "environment", "environment__project__account") + + def get_queryset(self, request): + qs = super().get_queryset(request) + return qs.only( + "heartbeat_at", + "code_server_active", + "user__name", + "user__email", + "environment__name", + "environment__slug", + "environment__project__account__name", + "environment__project__account__slug", + ) + + def account(self, obj): + return obj.environment.project.account + + account.short_description = "Account" + account.admin_order_field = "environment__project__account" + + list_display = ( + "account", + "environment", + "user", + "heartbeat_at", + "code_server_active", + ) + list_filter = ( + (AccountFilter), + (EnvironmentFilter), + (UserFilter), + "code_server_active", + "heartbeat_at", + ) + search_fields = ("user__name", "environment__name") + readonly_fields = ("share_links",) + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + EncryptedJSONField: {"widget": JSONEditorWidget}, + } + + +@admin.register(EnvironmentIntegration) +class EnvironmentIntegrationAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ( + "account", + "environment", + "environment_name", + "service", + "integration", + ) + + def account(self, obj): + return obj.environment.project.account + + account.short_description = "Account" + account.admin_order_field = "environment__project__account" + + def environment_name(self, obj): + return obj.environment.name + + environment_name.short_description = "Environment name" + environment_name.admin_order_field = "environment__name" + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + search_fields = ("environment__project__account__name", "integration__name") + list_filter = ("environment__project__account", "integration__type") + + +@admin.register(BlockedPodCreationRequest) +class BlockedPodCreationRequestAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ( + "id", + "uid", + "creation_timestamp", + "kind", + "name", + "namespace", + ) + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + readonly_fields = ( + "id", + "uid", + "creation_timestamp", + "kind", + "name", + "namespace", + "request", + "response", + "request_uid", + ) diff --git a/src/core/api/app/projects/apps.py b/src/core/api/app/projects/apps.py new file mode 100644 index 00000000..336c5959 --- /dev/null +++ b/src/core/api/app/projects/apps.py @@ -0,0 +1,11 @@ +from django.apps import AppConfig + + +class ProjectsConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "projects" + + def ready(self): + # Import tasks so django's autoreloader detects changes to it. + from . import signals # noqa F401 + from . import tasks # noqa F401 diff --git a/src/core/api/app/projects/azure.py b/src/core/api/app/projects/azure.py new file mode 100644 index 00000000..09f01834 --- /dev/null +++ b/src/core/api/app/projects/azure.py @@ -0,0 +1,174 @@ +""" +Class to handle interfacing with Azure. In particular, this is how Azure +credentials can be converted to generate OAuth tokens for GIT checkout. +""" + +import json +import os +import subprocess +from pathlib import Path +from tempfile import TemporaryDirectory + +# How many seconds to wait before aborting Azure CLI commands? This +# needs to be a reasonable number to make sure we're not waiting forever +# for this. +AZURE_SUBPROCESS_TIMEOUT = 45 + + +class AzureDevops: + def __init__( + self, + tenant_id: str, + app_id: str, + secret: str = None, + certificate: str = None, + ): + """Initializes AzureDevops class for various operations. + + Either 'secret' or 'certificate' is required. 'secret' will trump + certificate if both are provided. + + Certificate is the public key with the private key appended. In + our objects, this is usually like: + + project_repo.azure_deploy_key.public + "\n" + + project_repo.azure_deploy_key.private + + """ + + self.tenant_id = tenant_id + self.app_id = app_id + + if secret is not None: + self.secret = secret + self.certificate = None + elif certificate is not None: + self.secret = None + self.certificate = certificate + else: + raise RuntimeError("secret or certificate must be provided") + + # This is used in a few places and is properly configured by + # _login + self.env_dict = dict(os.environ) + + def _redact_exception(self, e: subprocess.CalledProcessError): + """Redacts a CalledProcessError exception to clean out potential + secrets. + """ + + for i in range(0, len(e.cmd)): + if e.cmd[i] in (self.app_id, self.secret, self.tenant_id): + e.cmd[i] = "*REDACTED*" + + return e + + def _login(self, home: Path): + """This performs the correct variant of 'az login' for the provided + secret or certificate. + + 'home' is the directory we will use as a home directory for the + login in order to provide profile isolation. It should typically + be a temporary directory that is cleaned up after we're done + with it. + + Raises CalledProcessError on failure, which is redacted so any + password elements are removed. + """ + + if self.secret is not None: + cmd = [ + "az", + "login", + "--service-principal", + "--allow-no-subscriptions", + "--username", + self.app_id, + "--password", + self.secret, + "--tenant", + self.tenant_id, + ] + + else: + pem_path = home / "cert-tmp.pem" + pem_path.write_text(self.certificate) + + cmd = [ + "az", + "login", + "--service-principal", + "--allow-no-subscriptions", + "--username", + self.app_id, + "--password", + str(pem_path), + "--tenant", + self.tenant_id, + ] + + # Get our environment as a dictionary + self.env_dict["HOME"] = str(home) + + # Attempt the login - errors can contain the password, so let's + # catch it and filter out the password before re-throwing. + try: + subprocess.run( + cmd, + timeout=AZURE_SUBPROCESS_TIMEOUT, + env=self.env_dict, + check=True, + capture_output=True, + ) + + except subprocess.CalledProcessError as e: + raise self._redact_exception(e) + + except subprocess.TimeoutExpired as e: + raise self._redact_exception(e) + + def get_access_token(self) -> dict: + """Generates an Azure access token. This returns a dictionary + with the following fields: + + accessToken: the token itself + expiresOn: expiration timestamp string + subscription: the associated subscription + tenant: the associated tenant + tokenType: string, should be 'Bearer' + + The expiresOn is in the format: '2024-09-20 20:28:47.462953' + It is a UTC timestamp. + + Raises CalledProcessError on failure, which is redacted so any + password elements are removed. + """ + + with TemporaryDirectory() as tempdir: + # First login + self._login(Path(tempdir)) + + try: + # Now try to get a token + result = subprocess.run( + [ + "az", + "account", + "get-access-token", + "--tenant", + self.tenant_id, + ], + timeout=AZURE_SUBPROCESS_TIMEOUT, + env=self.env_dict, + check=True, + capture_output=True, + text=True, + ) + + return json.loads(result.stdout) + + except subprocess.CalledProcessError as e: + raise self._redact_exception(e) + + except subprocess.TimeoutExpired as e: + raise self._redact_exception(e) diff --git a/src/core/api/app/projects/consumers.py b/src/core/api/app/projects/consumers.py new file mode 100644 index 00000000..147ca705 --- /dev/null +++ b/src/core/api/app/projects/consumers.py @@ -0,0 +1,118 @@ +import json +import logging +import uuid + +from asgiref.sync import sync_to_async +from celery import group +from channels.generic.websocket import AsyncWebsocketConsumer +from clusters.builder import WorkbenchBuilder + +from lib.utils import serialize_datetime + +from .tasks import sync_user_workloads_status + +logger = logging.getLogger(__name__) + + +class AccountConsumer(AsyncWebsocketConsumer): + async def connect(self): + self.group_name = None + self.user = self.scope["user"] + self.account_slug = self.scope["url_route"]["kwargs"]["account_slug"] + + if self.user and self.user.is_authenticated and self.account_slug: + group_name = f"workspace_user_account_slug_{self.account_slug}_user_slug_{self.user.slug}" + self.group_name = group_name + + # Join group + await self.channel_layer.group_add(self.group_name, self.channel_name) + await self.accept() + + else: + await self.close() + + async def disconnect(self, close_code): + # Leave group + if self.group_name: + await self.channel_layer.group_discard(self.group_name, self.channel_name) + + # Receive message from WebSocket + async def receive(self, text_data): + text_data_json = json.loads(text_data) + logger.debug("WEBSOCKET message received %s", text_data) + + message_type = text_data_json["message_type"] + if message_type == "env.status": + env_slugs = text_data_json["env_slugs"] + component = text_data_json["component"] + attempt = text_data_json.get("attempt", 1) + await self.env_status_check(env_slugs.split(","), component, attempt) + + elif message_type == "env.restart.code-server": + env_slug = text_data_json["env_slug"] + await self.workbench_code_server_restart(env_slug) + + elif message_type == "env.start.local-airflow": + env_slug = text_data_json["env_slug"] + await self.workbench_start_local_airflow(env_slug) + + elif message_type == "env.heartbeat": + env_slug = text_data_json["env_slug"] + await self.workbench_heartbeat(env_slug) + + else: + logger.warning("Unknown web socket message type %s", message_type) + + @sync_to_async + def env_status_check(self, env_slugs: list, component: str, attempt: int): + tasks = [] + for env_slug in env_slugs: + uid = str(uuid.uuid4()) + task_id = f"env.state.{env_slug}.{self.user.slug}-{uid}" + tasks.append( + sync_user_workloads_status.s( + account_slug=self.account_slug, + env_slug=env_slug, + user_slug=self.user.slug, + ).set(task_id=task_id) + ) + + if tasks: + # Execute tasks in parallel + group(tasks)(kwargs={"component": component, "attempt": attempt}) + + return tasks + + @sync_to_async + def workbench_heartbeat(self, env_slug): + WorkbenchBuilder(user=self.user, env_slug=env_slug).heartbeat() + + @sync_to_async + def workbench_code_server_restart(self, env_slug): + WorkbenchBuilder( + user=self.user, env_slug=env_slug + ).check_permissions().code_server.restart() + + @sync_to_async + def workbench_start_local_airflow(self, env_slug): + WorkbenchBuilder( + user=self.user, env_slug=env_slug + ).check_permissions().code_server.enable_local_airflow() + + # Send user notification message + async def user_notification(self, event): + message_type = event["message_type"] + message = event["message"] + payload = {"message_type": message_type, "message": json.loads(message)} + + # Send message to WebSocket + await self.send(text_data=json.dumps(payload, default=serialize_datetime)) + + # Send environment status message + async def env_status_change(self, event): + message_type = event["message_type"] + message = event["message"] + payload = {"message_type": message_type, "message": json.loads(message)} + + # Send message to WebSocket + await self.send(text_data=json.dumps(payload, default=serialize_datetime)) diff --git a/src/core/api/app/projects/cryptography.py b/src/core/api/app/projects/cryptography.py new file mode 100644 index 00000000..3c9d15b0 --- /dev/null +++ b/src/core/api/app/projects/cryptography.py @@ -0,0 +1,184 @@ +from datetime import datetime, timedelta, timezone + +from cryptography import x509 +from cryptography.hazmat.primitives import hashes, serialization +from cryptography.hazmat.primitives.asymmetric import dsa, ec, ed25519, rsa +from cryptography.hazmat.primitives.serialization import ( + load_pem_private_key, + load_ssh_private_key, +) +from cryptography.x509.oid import NameOID + +DSA_KEY_TYPE = "dsa" +ECDSA_KEY_TYPE = "ecdsa" +ED25519_KEY_TYPE = "ed25519" +RSA_KEY_TYPE = "rsa" +ED25519_SK_KEY_TYPE = "ed25519-sk" # not supported? +ECDSA_SK_KEY_TYPE = "ecdsa-sk" # not supported? + + +def key_to_str(key) -> str: + if isinstance(key, ec.EllipticCurvePrivateKey): + return ECDSA_KEY_TYPE + if isinstance(key, ed25519.Ed25519PrivateKey): + return ED25519_KEY_TYPE + if isinstance(key, rsa.RSAPrivateKey): + return RSA_KEY_TYPE + if isinstance(key, dsa.DSAPrivateKey): + return DSA_KEY_TYPE + + raise ValueError(f"Unsupported key type: {type(key)}") + + +def generate_ssh_key_pair(key_type: str = ED25519_KEY_TYPE) -> dict: + """Generate private and public SSH keys""" + + if key_type not in (ED25519_KEY_TYPE, RSA_KEY_TYPE): + raise ValueError(f"Key type '{key_type}' not supported.") + + if key_type == RSA_KEY_TYPE: + private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048) + else: + private_key = ed25519.Ed25519PrivateKey.generate() + + private_bytes = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.OpenSSH, + encryption_algorithm=serialization.NoEncryption(), + ) + + return { + "private": private_bytes.decode(), + "public": _generate_public_key_from_ssh_private(private_key), + "key_type": key_type, + } + + +def generate_ssh_public_key(private: str, is_ssl=False) -> dict: + try: + private_key = load_ssh_private_key(str.encode(private), None) + except ValueError as ex: + if "Not OpenSSH" in str(ex): + private_key = load_pem_private_key(str.encode(private), None) + else: + raise ex + return { + "private": private, + "key_type": key_to_str(private_key), + "public": _generate_public_key_from_ssh_private(private_key), + } + + +def _generate_public_key_from_ssh_private(private_key) -> str: + public_key = private_key.public_key() + return public_key.public_bytes( + encoding=serialization.Encoding.OpenSSH, + format=serialization.PublicFormat.OpenSSH, + ).decode() + + +def generate_azure_keypair() -> dict: + """Generate a self-signed PEM file that can be used by Azure. + This is very similar to generate_ssl_key_pair but Azure wants a + very specific format that generate_ssl_key_pair doesn't generate. + + Making a new method seems more sensible than trying to if/else around + in the existing method overmuch. + """ + + key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + ) + + subject = issuer = x509.Name( + [ + x509.NameAttribute(NameOID.COUNTRY_NAME, "US"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, "California"), + x509.NameAttribute(NameOID.LOCALITY_NAME, "Thousand Oaks"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, "Datacoves"), + x509.NameAttribute(NameOID.COMMON_NAME, "Datacoves"), + ] + ) + + cert = ( + x509.CertificateBuilder() + .subject_name(subject) + .issuer_name(issuer) + .public_key(key.public_key()) + .serial_number(x509.random_serial_number()) + .not_valid_before(datetime.now(timezone.utc)) + .not_valid_after(datetime.now(timezone.utc) + timedelta(days=5 * 365)) + .sign(key, hashes.SHA256()) + ) + + return { + "private": key.private_bytes( + encoding=serialization.Encoding.PEM, + encryption_algorithm=serialization.NoEncryption(), + format=serialization.PrivateFormat.TraditionalOpenSSL, + ).decode(), + "public": cert.public_bytes( + encoding=serialization.Encoding.PEM, + ).decode(), + "key_type": RSA_KEY_TYPE, + } + + +def generate_ssl_key_pair(key_type: str = RSA_KEY_TYPE) -> dict: + """Generate private and public OpenSSL keys""" + + if key_type != RSA_KEY_TYPE: + raise ValueError(f"Key type '{key_type}' not supported.") + + private_key = rsa.generate_private_key( + public_exponent=65537, + key_size=4096, + ) + + private_bytes = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + ) + + return { + "private": private_bytes.decode(), + "key_type": key_type, + "public": _generate_public_key_from_ssl_private(private_key), + } + + +def generate_ssl_public_key(private: str) -> dict: + if "-----BEGIN OPENSSH PRIVATE KEY-----" in private: + private_key = load_ssh_private_key(str.encode(private), None) + pem = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), + ) + private = pem.decode("utf-8") + + private_key = load_pem_private_key(str.encode(private), None) + + return { + "private": private, + "key_type": key_to_str(private_key), + "public": _generate_public_key_from_ssl_private(private_key), + } + + +def _generate_public_key_from_ssl_private(private_key) -> str: + public_key = private_key.public_key() + public_str = ( + public_key.public_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PublicFormat.SubjectPublicKeyInfo, + ) + .decode() + .strip() + ) + if public_str.startswith("--"): # strip -----BEGIN PUBLIC KEY----- + public_key_lines = public_str.split("\n") + public_str = "".join(public_key_lines[1:-1]) + return public_str diff --git a/src/core/api/app/projects/exceptions.py b/src/core/api/app/projects/exceptions.py new file mode 100644 index 00000000..791193d0 --- /dev/null +++ b/src/core/api/app/projects/exceptions.py @@ -0,0 +1,6 @@ +class HookException(Exception): + pass + + +class SQLHookException(HookException): + pass diff --git a/src/core/api/app/projects/git.py b/src/core/api/app/projects/git.py new file mode 100644 index 00000000..6fd56fae --- /dev/null +++ b/src/core/api/app/projects/git.py @@ -0,0 +1,255 @@ +import logging +import os +import tempfile +from glob import glob +from pathlib import Path +from subprocess import run as shell_run +from urllib.parse import quote, urlparse + +from django.utils import timezone +from git import Repo +from git.exc import GitCommandError +from rest_framework.response import Response +from rest_framework.status import HTTP_200_OK, HTTP_400_BAD_REQUEST +from tenacity import retry, stop_after_attempt, wait_fixed + +from .azure import AzureDevops + +logger = logging.getLogger(__name__) + + +def test_git_connection(data): + from .models import Project, SSHKey, UserRepository + + user_repo_id = data.get("user_repository_id") + project_id = data.get("project_id") + get_dbt_projects = data.get("get_dbt_projects") + user_repo = None + project_repo = None + branch = None + clone_strategy = "ssh_clone" + ssh_key_private = None + username = None + password = None + + if user_repo_id: + try: + user_repo = UserRepository.objects.select_related("repository").get( + id=user_repo_id + ) + except UserRepository.DoesNotExist: + data = { + "message": "User repository config not found. Please refresh " + "the page and check the SSH Key wasn't deleted." + } + return Response(data=data, status=HTTP_400_BAD_REQUEST) + git_url = user_repo.repository.git_url + ssh_key_private = user_repo.ssh_key.private + elif project_id: + project_repo = Project.objects.select_related("repository").get(id=project_id) + clone_strategy = project_repo.clone_strategy + branch = project_repo.release_branch + + if clone_strategy == "ssh_clone": + git_url = project_repo.repository.git_url + ssh_key_private = project_repo.deploy_key.private + elif clone_strategy.startswith("azure_"): + # Both azure strategies work the same + git_url = project_repo.repository.url + + if clone_strategy == project_repo.AZURE_SECRET_CLONE_STRATEGY: + az = AzureDevops( + project_repo.deploy_credentials.get("azure_tenant", ""), + project_repo.deploy_credentials.get("git_username", ""), + project_repo.deploy_credentials.get("git_password", ""), + ) + + else: + az = AzureDevops( + project_repo.deploy_credentials.get("azure_tenant", ""), + project_repo.deploy_credentials.get("git_username", ""), + None, + project_repo.azure_deploy_key.public + + "\n" + + project_repo.azure_deploy_key.private, + ) + + oauth_creds = az.get_access_token() + username = oauth_creds["accessToken"] + password = "" + + else: + git_url = project_repo.repository.url + username = project_repo.deploy_credentials.get("git_username") + password = project_repo.deploy_credentials.get("git_password") + if not username or not password: + data = {"message": "Missing git HTTP credentials in project."} + return Response(data=data, status=HTTP_400_BAD_REQUEST) + else: + # This is the scenario when the account setup wizard needs to validate git repo + git_url = data["url"] + ssh_key_private = SSHKey.objects.get(id=data["key_id"]).private + branch = data.get("branch") + + validated_at = None + data = {} + status = HTTP_400_BAD_REQUEST + try: + dbt_project_paths = try_git_clone( + clone_strategy, + git_url, + branch=branch, + ssh_key_private=ssh_key_private, + username=username, + password=password, + get_dbt_projects=get_dbt_projects, + ) + validated_at = timezone.now() + data = { + "message": "Git accessed successfully", + "dbt_project_paths": dbt_project_paths, + } + status = HTTP_200_OK + except Exception as e: + logger.debug(e) + data = {"message": "Could not connect to the Git repository", "extra": str(e)} + status = HTTP_400_BAD_REQUEST + + finally: + if user_repo and user_repo.validated_at != validated_at: + user_repo.validated_at = validated_at + user_repo.save() + elif project_repo and project_repo.validated_at != validated_at: + project_repo.validated_at = validated_at + project_repo.save() + return Response(data=data, status=status) + + +@retry( + stop=stop_after_attempt(3), + wait=wait_fixed(2), + reraise=True, +) +def try_git_clone( + clone_strategy, + git_url, + branch=None, + ssh_key_private=None, + username=None, + password=None, + get_dbt_projects=None, +): + logger.debug(f"Attempting to clone: {git_url} with strategy: {clone_strategy}") + dbt_project_paths = [] + if clone_strategy == "ssh_clone": + assert ( + ssh_key_private is not None + ), "Missing ssh_key_private when clone_strategy is ssh_clone" + known_hosts_path = Path("/home/abc/.ssh/known_hosts") + new_host = _keyscan_and_register_host(git_url, known_hosts_path) + else: + assert ( + username is not None + ), "Missing username when clone_strategy is http_clone" + assert ( + password is not None + ), "Missing password when clone_strategy is http_clone" + with tempfile.TemporaryDirectory() as tmp_dir: + try: + if clone_strategy == "ssh_clone": + ssh_file = _create_ssh_file(ssh_key_private, tmp_dir) + git_repo = Repo.clone_from( + git_url, + f"{tmp_dir}/repo", + env={ + "GIT_SSH_COMMAND": f'ssh -i {ssh_file.name} -o "StrictHostKeyChecking no"' + }, + depth=1, + ) + else: + encoded_pass = quote(password, safe="") + git_repo = Repo.clone_from( + git_url.replace("https://", f"https://{username}:{encoded_pass}@"), + f"{tmp_dir}/repo", + depth=1, + ) + + if branch and not git_repo.git.ls_remote("--heads", "origin", branch): + raise LookupError(f"Branch '{branch}' does not exist in repo.") + if get_dbt_projects: + repo_root_as_path = Path(git_repo.working_dir) + for dbt_project_path in glob( + f"{git_repo.working_dir}/**/dbt_project.yml" + ): + if ( + "dbt_packages" not in dbt_project_path + and "dbt_modules" not in dbt_project_path + ): + dbt_path = ( + Path(dbt_project_path).relative_to(repo_root_as_path).parent + ) + dbt_project_paths.append(dbt_path.as_posix()) + except GitCommandError as gce: + raise Exception(gce.stderr) + finally: + if clone_strategy == "ssh_clone": + # Remove the new_host from known_hosts file + with open(known_hosts_path, "w+") as known_hosts: + for line in known_hosts: + line.replace(new_host, "") + return dbt_project_paths + + +def _create_ssh_file(ssh_key_private, tmp_dir): + """ + Grab the SSHKey object and + write it's private key into a file inside the tmp_dir + """ + ssh_path = f"{tmp_dir}/id_rsa" + f = open(ssh_path, "w+") + f.write(ssh_key_private) + f.close() + os.chmod(ssh_path, 0o700) + return f + + +def _run_and_capture(args_list): + return shell_run(args_list, capture_output=True, text=True) + + +def _keyscan_and_register_host(git_url, known_hosts_path): + """ + Executes ssh-keyscan to discover the new SSH Host + if found, adds it to the known_hosts file + Acts as a workaround for fingerprint confirmation (yes/no prompt) + """ + ssh_url = f"ssh://{git_url}" if "ssh://" not in git_url else git_url + url_parsed = urlparse(ssh_url) + domain = url_parsed.hostname + port = None + try: + port = url_parsed.port + except ValueError: + pass + + if port: + output = _run_and_capture(["ssh-keyscan", "-t", "rsa", "-p", str(port), domain]) + else: + output = _run_and_capture(["ssh-keyscan", "-t", "rsa", domain]) + + if output.returncode != 0: + data = {"message": f"Failed to run ssh-keyscan. {output.stderr}"} + return Response(data=data, status=HTTP_400_BAD_REQUEST) + + new_host = output.stdout + + if not known_hosts_path.exists(): + known_hosts_path.parent.mkdir(parents=True, exist_ok=True) + open(known_hosts_path, "w") + + hosts = open(known_hosts_path, "r").read() + if domain not in hosts: + with open(known_hosts_path, "a") as file: + file.write(new_host) + logger.info("%s registered as a SSH known host.", domain) + return new_host diff --git a/src/core/api/app/projects/management/__init__.py b/src/core/api/app/projects/management/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/projects/management/commands/load_releases.py b/src/core/api/app/projects/management/commands/load_releases.py new file mode 100644 index 00000000..dccdacc1 --- /dev/null +++ b/src/core/api/app/projects/management/commands/load_releases.py @@ -0,0 +1,79 @@ +import logging +from os import listdir +from pathlib import Path + +from django.conf import settings +from django.core.exceptions import FieldError +from django.core.management.base import BaseCommand +from projects.models import Release + +from lib.config_files import load_yaml + +logger = logging.getLogger(__name__) + + +class Command(BaseCommand): + help = "Loads releases from a tar.gz file, receives an optional `current-release`" + " argument to filter out releases greater than current one" + + def add_arguments(self, parser): + parser.add_argument( + "--releases", + help="Path to releases directory.", + default="/tmp/releases", + ) + parser.add_argument( + "--current-release", + help="Current release as maximum version that can be loaded.", + ) + + def handle(self, *args, **options): + load_releases( + releases_dir=options["releases"], + current_release=options["current_release"], + ) + + +def load_releases(releases_dir="/tmp/releases", current_release=None): + releases_dir = Path(releases_dir) + releases = [ + f + for f in listdir(releases_dir) + if Path(releases_dir / f).is_file() + and (not f.startswith("pre") or settings.BASE_DOMAIN == "datacoveslocal.com") + ] + for release_name in sorted(releases): + release_yaml = load_yaml(releases_dir / release_name) + name = release_yaml.pop("name") + if ( + not current_release + or current_release + and name <= current_release + or name.startswith("pre") + and settings.BASE_DOMAIN == "datacoveslocal.com" + ): + try: + _, created = Release.objects.update_or_create( + name=name, defaults=release_yaml + ) + logger.info(f"Release {name} {'created' if created else 'updated'}.") + + except FieldError as e: + logger.info( + f"Release {name} has a field we do not recognize. This " + "release is incompatible with our currently checked out " + "code and will not be imported. Error: %s", + e, + ) + else: + logger.info(f"Release {name} greater than {current_release}. Skipping.") + + for release in Release.objects.prefetch_related("environments").all(): + if f"{release.name}.yaml" not in releases: + if release.environments.count() == 0 and release.clusters.count() == 0: + release.delete() + logger.info(f"Release {release} deleted.") + else: + logger.info( + f"Release {release} not deleted. Environment(s) or cluster still using it." + ) diff --git a/src/core/api/app/projects/management/commands/register_environment.py b/src/core/api/app/projects/management/commands/register_environment.py new file mode 100644 index 00000000..6865d039 --- /dev/null +++ b/src/core/api/app/projects/management/commands/register_environment.py @@ -0,0 +1,54 @@ +from pathlib import Path + +from clusters.config_loader.environment import EnvironmentConfigLoader +from django.conf import settings +from django.core.management.base import BaseCommand + +from lib.config_files import load_file + + +class Command(BaseCommand): + help = "Registers a new environment by reading a yaml config file." + + def add_arguments(self, parser): + parser.add_argument( + "--config", + help="Path to the configuration directory.", + default="/tmp/config", + ) + parser.add_argument( + "--env", + help="Environment slug.", + ) + parser.add_argument( + "--user-confirm", + default="true", + help="Requires user confirmation.", + ) + + def handle(self, *args, **options): + config_dir = Path(options["config"]) + env_slug = options["env"] + env_dir = config_dir / "environments" / env_slug + req_user_confirm = options["user_confirm"].lower() in ( + "yes", + "y", + "true", + "t", + "1", + ) + + env_config = load_file(env_dir / "environment") + service_config = { + settings.SERVICE_CODE_SERVER: load_file(env_dir / "code_server"), + settings.SERVICE_AIRBYTE: load_file(env_dir / "airbyte"), + settings.SERVICE_AIRFLOW: load_file(env_dir / "airflow"), + settings.SERVICE_SUPERSET: load_file(env_dir / "superset"), + } + + EnvironmentConfigLoader.load( + env_slug=env_slug, + env_config=env_config, + service_config=service_config, + req_user_confirm=req_user_confirm, + ) diff --git a/src/core/api/app/projects/migrations/0001_initial.py b/src/core/api/app/projects/migrations/0001_initial.py new file mode 100644 index 00000000..6cf03c74 --- /dev/null +++ b/src/core/api/app/projects/migrations/0001_initial.py @@ -0,0 +1,393 @@ +# Generated by Django 3.2.6 on 2022-03-03 16:08 + +import autoslug.fields +import core.fields +from django.db import migrations, models +import django.db.models.deletion +import projects.models.connection +import projects.models.environment +import projects.models.project + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [] + + operations = [ + migrations.CreateModel( + name="Connection", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ( + "type", + models.CharField( + choices=[("snowflake", "Snowflake")], max_length=60 + ), + ), + ("name", models.CharField(max_length=130)), + ("connection_details", core.fields.EncryptedJSONField(default=dict)), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="Environment", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ( + "slug", + models.CharField( + default=projects.models.environment.generate_env_slug, + max_length=6, + unique=True, + ), + ), + ("name", models.CharField(max_length=50)), + ( + "type", + models.CharField( + choices=[("dev", "dev"), ("test", "test"), ("prod", "prod")], + default="dev", + max_length=60, + ), + ), + ("release_branch", models.CharField(default="main", max_length=255)), + ( + "services", + models.JSONField( + default=projects.models.environment.default_services + ), + ), + ("docker_registry", models.URLField(blank=True)), + ( + "docker_config_secret_name", + models.CharField( + default="docker-config-datacovesprivate", max_length=253 + ), + ), + ( + "docker_config", + core.fields.EncryptedJSONField( + default={ + "auths": { + "https://index.docker.io/v1/": { + "auth": "ZGF0YWNvdmVzcHJpdmF0ZTpXSzNmY296ZUp6elJtclZW", + "email": "hey+dockerhub@datacoves.com", + "password": "WK3fcozeJzzRmrVV", + "username": "datacovesprivate", + } + } + } + ), + ), + ("pomerium_config", core.fields.EncryptedJSONField(default=dict)), + ("airbyte_db_config", core.fields.EncryptedJSONField(default=dict)), + ("airbyte_logs_config", core.fields.EncryptedJSONField(default=dict)), + ("elastic_config", core.fields.EncryptedJSONField(default=dict)), + ("airflow_config", core.fields.EncryptedJSONField(default=dict)), + ( + "dbt_home_path", + models.CharField(default="transform", max_length=4096), + ), + ( + "dbt_profiles_dir", + models.CharField(default="automate", max_length=4096), + ), + ("metabase_db_config", core.fields.EncryptedJSONField(default=dict)), + ("workspace_generation", models.IntegerField(null=True)), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="Project", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("name", models.CharField(max_length=50)), + ( + "slug", + autoslug.fields.AutoSlugField( + editable=False, + populate_from=projects.models.project.project_slug, + unique=True, + ), + ), + ("release_branch", models.CharField(default="main", max_length=130)), + ( + "clone_strategy", + models.CharField( + choices=[ + ("ssh_clone", "SSH git clone"), + ("http_clone", "HTTP git clone"), + ], + default="ssh_clone", + max_length=60, + ), + ), + ( + "deploy_credentials", + core.fields.EncryptedJSONField(blank=True, default=dict, null=True), + ), + ( + "settings", + models.JSONField( + blank=True, + default=dict, + null=True, + ), + ), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="Release", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("name", models.CharField(max_length=32, unique=True)), + ("notes", models.TextField(blank=True, null=True)), + ("commit", models.CharField(max_length=100)), + ("released_at", models.DateTimeField()), + ("images", models.JSONField(default=dict)), + ("airbyte_images", models.JSONField(default=list)), + ("airflow_images", models.JSONField(default=list)), + ("ci_images", models.JSONField(default=list)), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="Repository", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("url", models.URLField(max_length=250, unique=True)), + ("git_url", models.CharField(max_length=250, unique=True)), + ( + "provider", + models.CharField( + blank=True, + choices=[ + ("github", "Github"), + ("gitlab", "Gitlab"), + ("bitbucket", "BitBucket"), + ], + max_length=60, + null=True, + ), + ), + ], + options={ + "verbose_name_plural": "repositories", + }, + ), + migrations.CreateModel( + name="ServiceCredential", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("connection_overrides", core.fields.EncryptedJSONField(default=dict)), + ( + "service", + models.CharField( + choices=[ + ("dbt-docs", "Dbt-Docs"), + ("airbyte", "Airbyte"), + ("airflow", "Airflow"), + ("fluentd", "Fluentd"), + ("metabase", "Metabase"), + ("code-server", "Code-Server"), + ("superset", "Superset"), + ], + max_length=50, + ), + ), + ("target", models.CharField(default="main", max_length=50)), + ], + ), + migrations.CreateModel( + name="SSHKey", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ( + "key_type", + models.CharField( + choices=[ + ("dsa", "dsa"), + ("ecdsa", "ecdsa"), + ("ecdsa-sk", "ecdsa-sk"), + ("ed25519", "ed25519"), + ("ed25519-sk", "ed25519-sk"), + ("rsa", "rsa"), + ], + default="ed25519", + max_length=20, + ), + ), + ("private", models.TextField()), + ("public", models.TextField()), + ], + options={ + "verbose_name": "SSH key", + }, + ), + migrations.CreateModel( + name="UserCredential", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("connection_overrides", core.fields.EncryptedJSONField(default=dict)), + ( + "used_on", + models.JSONField( + default=projects.models.connection.default_user_credential_usages + ), + ), + ], + ), + migrations.CreateModel( + name="Version", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("name", models.CharField(max_length=32, unique=True)), + ("airbyte_version", models.CharField(max_length=32)), + ("airflow_version", models.CharField(max_length=32)), + ("code_server_version", models.CharField(max_length=32)), + ("dbt_version", models.CharField(max_length=32)), + ("metabase_version", models.CharField(max_length=32)), + ("code_server_libraries", models.JSONField(default="")), + ("code_server_extensions", models.JSONField(default="")), + ], + options={ + "abstract": False, + }, + ), + migrations.CreateModel( + name="UserRepository", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ( + "repository", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="users", + to="projects.repository", + ), + ), + ( + "ssh_key", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="users", + to="projects.sshkey", + ), + ), + ], + options={ + "verbose_name_plural": "user repositories", + }, + ), + ] diff --git a/src/core/api/app/projects/migrations/0002_initial.py b/src/core/api/app/projects/migrations/0002_initial.py new file mode 100644 index 00000000..48ce26ad --- /dev/null +++ b/src/core/api/app/projects/migrations/0002_initial.py @@ -0,0 +1,107 @@ +# Generated by Django 3.2.6 on 2022-03-03 16:08 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('projects', '0001_initial'), + ('users', '0001_initial'), + ('clusters', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='userrepository', + name='user', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='repositories', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='usercredential', + name='connection', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_credentials', to='projects.connection'), + ), + migrations.AddField( + model_name='usercredential', + name='environment', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_credentials', to='projects.environment'), + ), + migrations.AddField( + model_name='usercredential', + name='user', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='credentials', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='servicecredential', + name='connection', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service_credentials', to='projects.connection'), + ), + migrations.AddField( + model_name='servicecredential', + name='environment', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='service_credentials', to='projects.environment'), + ), + migrations.AddField( + model_name='release', + name='version', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='releases', to='projects.version'), + ), + migrations.AddField( + model_name='project', + name='account', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='users.account'), + ), + migrations.AddField( + model_name='project', + name='deploy_key', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='projects', to='projects.sshkey'), + ), + migrations.AddField( + model_name='project', + name='repository', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.repository'), + ), + migrations.AddField( + model_name='environment', + name='cluster', + field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='environments', to='clusters.cluster'), + ), + migrations.AddField( + model_name='environment', + name='project', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='environments', to='projects.project'), + ), + migrations.AddField( + model_name='environment', + name='release', + field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='environments', to='projects.release'), + ), + migrations.AddField( + model_name='environment', + name='version', + field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='environments', to='projects.version'), + ), + migrations.AddField( + model_name='connection', + name='project', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='connections', to='projects.project'), + ), + migrations.AddConstraint( + model_name='userrepository', + constraint=models.UniqueConstraint(fields=('user', 'repository'), name='User repository uniqueness'), + ), + migrations.AddConstraint( + model_name='usercredential', + constraint=models.UniqueConstraint(fields=('user', 'environment', 'used_on'), name='User credential uniqueness'), + ), + migrations.AddConstraint( + model_name='servicecredential', + constraint=models.UniqueConstraint(fields=('environment', 'service', 'target'), name='Environment service credential uniqueness'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0003_alter_servicecredential_service.py b/src/core/api/app/projects/migrations/0003_alter_servicecredential_service.py new file mode 100644 index 00000000..a8db5638 --- /dev/null +++ b/src/core/api/app/projects/migrations/0003_alter_servicecredential_service.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-03-18 13:16 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0002_initial'), + ] + + operations = [ + migrations.AlterField( + model_name='servicecredential', + name='service', + field=models.CharField(choices=[('airbyte', 'Airbyte'), ('airflow', 'Airflow'), ('code-server', 'Code-Server'), ('dbt-docs', 'Dbt-Docs'), ('metabase', 'Metabase'), ('superset', 'Superset')], max_length=50), + ), + ] diff --git a/src/core/api/app/projects/migrations/0004_auto_20220318_1753.py b/src/core/api/app/projects/migrations/0004_auto_20220318_1753.py new file mode 100644 index 00000000..6c2acf6a --- /dev/null +++ b/src/core/api/app/projects/migrations/0004_auto_20220318_1753.py @@ -0,0 +1,39 @@ +# Generated by Django 3.2.6 on 2022-03-18 17:53 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0003_alter_servicecredential_service'), + ] + + operations = [ + migrations.RenameField( + model_name='environment', + old_name='metabase_db_config', + new_name='superset_db_config', + ), + migrations.RenameField( + model_name='version', + old_name='metabase_version', + new_name='superset_version', + ), + migrations.AddField( + model_name='environment', + name='superset_keystore_config', + field=core.fields.EncryptedJSONField(default=dict, editable=True), + ), + migrations.AddField( + model_name='release', + name='superset_images', + field=models.JSONField(default=list), + ), + migrations.AlterField( + model_name='servicecredential', + name='service', + field=models.CharField(choices=[('airbyte', 'Airbyte'), ('airflow', 'Airflow'), ('code-server', 'Code-Server'), ('dbt-docs', 'Dbt-Docs'), ('superset', 'Superset')], max_length=50), + ), + ] diff --git a/src/core/api/app/projects/migrations/0005_auto_20220318_2008.py b/src/core/api/app/projects/migrations/0005_auto_20220318_2008.py new file mode 100644 index 00000000..40173fd5 --- /dev/null +++ b/src/core/api/app/projects/migrations/0005_auto_20220318_2008.py @@ -0,0 +1,22 @@ +# Generated by Django 3.2.6 on 2022-03-18 20:08 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0004_auto_20220318_1753'), + ] + + operations = [ + migrations.RenameField( + model_name='environment', + old_name='superset_db_config', + new_name='superset_config', + ), + migrations.RemoveField( + model_name='environment', + name='superset_keystore_config', + ), + ] diff --git a/src/core/api/app/projects/migrations/0006_auto_20220329_1521.py b/src/core/api/app/projects/migrations/0006_auto_20220329_1521.py new file mode 100644 index 00000000..9b082e30 --- /dev/null +++ b/src/core/api/app/projects/migrations/0006_auto_20220329_1521.py @@ -0,0 +1,28 @@ +# Generated by Django 3.2.6 on 2022-03-29 15:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0005_auto_20220318_2008'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='dbt_docs_branch', + field=models.CharField(default='dbt-docs', max_length=130), + ), + migrations.AddField( + model_name='project', + name='dbt_docs_branch', + field=models.CharField(default='dbt-docs', max_length=130), + ), + migrations.AlterField( + model_name='environment', + name='release_branch', + field=models.CharField(default='main', max_length=130), + ), + ] diff --git a/src/core/api/app/projects/migrations/0007_auto_20220404_1508.py b/src/core/api/app/projects/migrations/0007_auto_20220404_1508.py new file mode 100644 index 00000000..4d339410 --- /dev/null +++ b/src/core/api/app/projects/migrations/0007_auto_20220404_1508.py @@ -0,0 +1,22 @@ +# Generated by Django 3.2.6 on 2022-04-04 15:08 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0006_auto_20220329_1521'), + ] + + operations = [ + migrations.RenameField( + model_name='environment', + old_name='airbyte_db_config', + new_name='airbyte_config', + ), + migrations.RemoveField( + model_name='environment', + name='airbyte_logs_config', + ), + ] diff --git a/src/core/api/app/projects/migrations/0008_auto_20220407_1512.py b/src/core/api/app/projects/migrations/0008_auto_20220407_1512.py new file mode 100644 index 00000000..8ec23b8d --- /dev/null +++ b/src/core/api/app/projects/migrations/0008_auto_20220407_1512.py @@ -0,0 +1,28 @@ +# Generated by Django 3.2.6 on 2022-04-07 15:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0007_auto_20220404_1508'), + ] + + operations = [ + migrations.AddField( + model_name='release', + name='airbyte_chart', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='airflow_chart', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='superset_chart', + field=models.JSONField(default=dict), + ), + ] diff --git a/src/core/api/app/projects/migrations/0009_alter_environment_docker_config.py b/src/core/api/app/projects/migrations/0009_alter_environment_docker_config.py new file mode 100644 index 00000000..9063d746 --- /dev/null +++ b/src/core/api/app/projects/migrations/0009_alter_environment_docker_config.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-04-12 14:05 + +import core.fields +from django.db import migrations +import projects.models.environment + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0008_auto_20220407_1512'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='docker_config', + field=core.fields.EncryptedJSONField(default=projects.models.environment.default_docker_config, editable=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0010_alter_connection_type.py b/src/core/api/app/projects/migrations/0010_alter_connection_type.py new file mode 100644 index 00000000..2cae5b4a --- /dev/null +++ b/src/core/api/app/projects/migrations/0010_alter_connection_type.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-05-03 15:36 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0009_alter_environment_docker_config'), + ] + + operations = [ + migrations.AlterField( + model_name='connection', + name='type', + field=models.CharField(choices=[('snowflake', 'Snowflake'), ('redshift', 'Redshift'), ('bigquery', 'BigQuery')], max_length=60), + ), + ] diff --git a/src/core/api/app/projects/migrations/0011_auto_20220506_1542.py b/src/core/api/app/projects/migrations/0011_auto_20220506_1542.py new file mode 100644 index 00000000..35ca3f5c --- /dev/null +++ b/src/core/api/app/projects/migrations/0011_auto_20220506_1542.py @@ -0,0 +1,28 @@ +# Generated by Django 3.2.6 on 2022-05-06 15:42 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0010_alter_connection_type"), + ] + + operations = [ + migrations.AddField( + model_name="environment", + name="sync", + field=models.BooleanField(default=False), + ), + migrations.AlterField( + model_name="version", + name="code_server_extensions", + field=models.JSONField(default=""), + ), + migrations.AlterField( + model_name="version", + name="code_server_libraries", + field=models.JSONField(default=""), + ), + ] diff --git a/src/core/api/app/projects/migrations/0012_alter_environment_docker_registry.py b/src/core/api/app/projects/migrations/0012_alter_environment_docker_registry.py new file mode 100644 index 00000000..b1d23290 --- /dev/null +++ b/src/core/api/app/projects/migrations/0012_alter_environment_docker_registry.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-05-17 13:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0011_auto_20220506_1542'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='docker_registry', + field=models.CharField(blank=True, max_length=253), + ), + ] diff --git a/src/core/api/app/projects/migrations/0013_auto_20220518_1855.py b/src/core/api/app/projects/migrations/0013_auto_20220518_1855.py new file mode 100644 index 00000000..052d5d5b --- /dev/null +++ b/src/core/api/app/projects/migrations/0013_auto_20220518_1855.py @@ -0,0 +1,59 @@ +# Generated by Django 3.2.6 on 2022-05-18 18:55 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0012_alter_environment_docker_registry'), + ] + + operations = [ + migrations.RemoveField( + model_name='environment', + name='version', + ), + migrations.RemoveField( + model_name='release', + name='version', + ), + migrations.AddField( + model_name='release', + name='airbyte_version', + field=models.CharField(default='', max_length=32), + ), + migrations.AddField( + model_name='release', + name='airflow_version', + field=models.CharField(default='', max_length=32), + ), + migrations.AddField( + model_name='release', + name='code_server_extensions', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='code_server_libraries', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='code_server_version', + field=models.CharField(default='', max_length=32), + ), + migrations.AddField( + model_name='release', + name='dbt_version', + field=models.CharField(default='', max_length=32), + ), + migrations.AddField( + model_name='release', + name='superset_version', + field=models.CharField(default='', max_length=32), + ), + migrations.DeleteModel( + name='Version', + ), + ] diff --git a/src/core/api/app/projects/migrations/0014_release_channels.py b/src/core/api/app/projects/migrations/0014_release_channels.py new file mode 100644 index 00000000..9c0d3872 --- /dev/null +++ b/src/core/api/app/projects/migrations/0014_release_channels.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-05-19 00:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0013_auto_20220518_1855'), + ] + + operations = [ + migrations.AddField( + model_name='release', + name='channels', + field=models.JSONField(default=list), + ), + ] diff --git a/src/core/api/app/projects/migrations/0015_auto_20220520_1213.py b/src/core/api/app/projects/migrations/0015_auto_20220520_1213.py new file mode 100644 index 00000000..e20908dd --- /dev/null +++ b/src/core/api/app/projects/migrations/0015_auto_20220520_1213.py @@ -0,0 +1,24 @@ +# Generated by Django 3.2.6 on 2022-05-20 12:13 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0014_release_channels'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='docker_config_secret_name', + field=models.CharField(blank=True, default='docker-config-datacovesprivate', max_length=253, null=True), + ), + migrations.AlterField( + model_name='project', + name='deploy_credentials', + field=core.fields.EncryptedJSONField(default=dict, editable=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0016_environment_last_sync_unmet_preconditions.py b/src/core/api/app/projects/migrations/0016_environment_last_sync_unmet_preconditions.py new file mode 100644 index 00000000..257047a6 --- /dev/null +++ b/src/core/api/app/projects/migrations/0016_environment_last_sync_unmet_preconditions.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-05-31 14:18 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0015_auto_20220520_1213'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='last_sync_unmet_preconditions', + field=models.JSONField(default=dict), + ), + ] diff --git a/src/core/api/app/projects/migrations/0017_alter_environment_last_sync_unmet_preconditions.py b/src/core/api/app/projects/migrations/0017_alter_environment_last_sync_unmet_preconditions.py new file mode 100644 index 00000000..ea4efd73 --- /dev/null +++ b/src/core/api/app/projects/migrations/0017_alter_environment_last_sync_unmet_preconditions.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-05-31 19:17 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0016_environment_last_sync_unmet_preconditions'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='last_sync_unmet_preconditions', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0018_auto_20220531_1949.py b/src/core/api/app/projects/migrations/0018_auto_20220531_1949.py new file mode 100644 index 00000000..2fe08f8e --- /dev/null +++ b/src/core/api/app/projects/migrations/0018_auto_20220531_1949.py @@ -0,0 +1,49 @@ +# Generated by Django 3.2.6 on 2022-05-31 19:49 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0017_alter_environment_last_sync_unmet_preconditions'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='airbyte_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='environment', + name='airflow_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='environment', + name='elastic_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='environment', + name='superset_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='project', + name='deploy_credentials', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='servicecredential', + name='connection_overrides', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='usercredential', + name='connection_overrides', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0019_remove_environment_release_branch.py b/src/core/api/app/projects/migrations/0019_remove_environment_release_branch.py new file mode 100644 index 00000000..a18e6406 --- /dev/null +++ b/src/core/api/app/projects/migrations/0019_remove_environment_release_branch.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-06-01 14:04 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0018_auto_20220531_1949'), + ] + + operations = [ + migrations.RemoveField( + model_name='environment', + name='release_branch', + ), + ] diff --git a/src/core/api/app/projects/migrations/0020_alter_environment_docker_config.py b/src/core/api/app/projects/migrations/0020_alter_environment_docker_config.py new file mode 100644 index 00000000..1029a666 --- /dev/null +++ b/src/core/api/app/projects/migrations/0020_alter_environment_docker_config.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-06-03 17:05 + +import core.fields +from django.db import migrations +import projects.models.environment + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0019_remove_environment_release_branch'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='docker_config', + field=core.fields.EncryptedJSONField(blank=True, default=projects.models.environment.default_docker_config, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0021_remove_environment_elastic_config.py b/src/core/api/app/projects/migrations/0021_remove_environment_elastic_config.py new file mode 100644 index 00000000..6fff56ac --- /dev/null +++ b/src/core/api/app/projects/migrations/0021_remove_environment_elastic_config.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-06-07 20:19 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0020_alter_environment_docker_config'), + ] + + operations = [ + migrations.RemoveField( + model_name='environment', + name='elastic_config', + ), + ] diff --git a/src/core/api/app/projects/migrations/0022_auto_20220608_2108.py b/src/core/api/app/projects/migrations/0022_auto_20220608_2108.py new file mode 100644 index 00000000..62fce1e8 --- /dev/null +++ b/src/core/api/app/projects/migrations/0022_auto_20220608_2108.py @@ -0,0 +1,35 @@ +# Generated by Django 3.2.6 on 2022-06-08 21:08 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0002_user_is_service_account'), + ('projects', '0021_remove_environment_elastic_config'), + ] + + operations = [ + migrations.CreateModel( + name='ConnectionType', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.CharField(max_length=130)), + ('required_fieldsets', models.JSONField(blank=True, default=list, null=True)), + ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='connection_types', to='users.account')), + ], + ), + migrations.AddField( + model_name='connection', + name='type_id', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connections', to='projects.connectiontype'), + ), + migrations.AddConstraint( + model_name='connectiontype', + constraint=models.UniqueConstraint(fields=('account', 'name'), name='Connection type account name uniqueness'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0023_auto_20220608_2108.py b/src/core/api/app/projects/migrations/0023_auto_20220608_2108.py new file mode 100644 index 00000000..6526a1a2 --- /dev/null +++ b/src/core/api/app/projects/migrations/0023_auto_20220608_2108.py @@ -0,0 +1,76 @@ +# Generated by Django 3.2.6 on 2022-06-08 21:08 + +from django.db import migrations + + +def account_connection_types(account, apps): + ConnectionType = apps.get_model("projects", "ConnectionType") + + ConnectionType.objects.update_or_create( + name="snowflake", + account=account, + defaults={ + "required_fieldsets": [ + [ + "account", + "username", + "password", + "database", + "warehouse", + "schema", + ] + ] + }, + ) + + ConnectionType.objects.update_or_create( + name="redshift", + account=account, + defaults={ + "required_fieldsets": [ + ["host", "port", "username", "password", "database", "schema"] + ] + }, + ) + + ConnectionType.objects.update_or_create( + name="bigquery", + account=account, + defaults={ + "required_fieldsets": [ + [ + "project", + "dataset", + "refresh_token", + "client_id", + "client_secret", + "token_uri", + ] + ] + }, + ) + + +def create_connection_types(apps, schema_editor): + Account = apps.get_model("users", "Account") + for account in Account.objects.all(): + account_connection_types(account, apps) + + Connection = apps.get_model("projects", "Connection") + ConnectionType = apps.get_model("projects", "ConnectionType") + for connection in Connection.objects.all(): + connection.type_id = ConnectionType.objects.get( + account=connection.project.account, name=connection.type + ) + connection.save() + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0022_auto_20220608_2108"), + ] + + operations = [ + migrations.RunPython(create_connection_types), + ] diff --git a/src/core/api/app/projects/migrations/0024_auto_20220608_2136.py b/src/core/api/app/projects/migrations/0024_auto_20220608_2136.py new file mode 100644 index 00000000..b5a9d86a --- /dev/null +++ b/src/core/api/app/projects/migrations/0024_auto_20220608_2136.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-06-08 21:36 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0023_auto_20220608_2108'), + ] + + operations = [ + migrations.RemoveField( + model_name='connection', + name='type', + ), + migrations.AlterField( + model_name='connection', + name='type_id', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='connections', to='projects.connectiontype'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0025_rename_type_id_connection_type.py b/src/core/api/app/projects/migrations/0025_rename_type_id_connection_type.py new file mode 100644 index 00000000..380904e6 --- /dev/null +++ b/src/core/api/app/projects/migrations/0025_rename_type_id_connection_type.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-06-08 21:37 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0024_auto_20220608_2136'), + ] + + operations = [ + migrations.RenameField( + model_name='connection', + old_name='type_id', + new_name='type', + ), + ] diff --git a/src/core/api/app/projects/migrations/0026_connection_for_users.py b/src/core/api/app/projects/migrations/0026_connection_for_users.py new file mode 100644 index 00000000..56e0ea56 --- /dev/null +++ b/src/core/api/app/projects/migrations/0026_connection_for_users.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-06-09 13:38 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0025_rename_type_id_connection_type'), + ] + + operations = [ + migrations.AddField( + model_name='connection', + name='for_users', + field=models.BooleanField(default=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0027_alter_sshkey_private.py b/src/core/api/app/projects/migrations/0027_alter_sshkey_private.py new file mode 100644 index 00000000..1404c78f --- /dev/null +++ b/src/core/api/app/projects/migrations/0027_alter_sshkey_private.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-06-14 19:50 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0026_connection_for_users"), + ] + + operations = [ + migrations.AlterField( + model_name="sshkey", + name="private", + field=core.fields.EncryptedTextField(editable=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0028_alter_project_deploy_key.py b/src/core/api/app/projects/migrations/0028_alter_project_deploy_key.py new file mode 100644 index 00000000..21cf1ad0 --- /dev/null +++ b/src/core/api/app/projects/migrations/0028_alter_project_deploy_key.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-06-15 15:26 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0027_alter_sshkey_private'), + ] + + operations = [ + migrations.AlterField( + model_name='project', + name='deploy_key', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='projects', to='projects.sshkey'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0029_alter_repository_url.py b/src/core/api/app/projects/migrations/0029_alter_repository_url.py new file mode 100644 index 00000000..d92e3472 --- /dev/null +++ b/src/core/api/app/projects/migrations/0029_alter_repository_url.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-06-15 21:55 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0028_alter_project_deploy_key'), + ] + + operations = [ + migrations.AlterField( + model_name='repository', + name='url', + field=models.URLField(blank=True, max_length=250, null=True, unique=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0030_auto_20220627_2207.py b/src/core/api/app/projects/migrations/0030_auto_20220627_2207.py new file mode 100644 index 00000000..0dd217e0 --- /dev/null +++ b/src/core/api/app/projects/migrations/0030_auto_20220627_2207.py @@ -0,0 +1,30 @@ +# Generated by Django 3.2.6 on 2022-06-27 22:07 + +import core.fields +from django.db import migrations, models +import projects.models.environment + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0029_alter_repository_url'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='internal_services', + field=models.JSONField(default=projects.models.environment.default_internal_services), + ), + migrations.AddField( + model_name='environment', + name='minio_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='release', + name='minio_chart', + field=models.JSONField(default=dict), + ), + ] diff --git a/src/core/api/app/projects/migrations/0030_auto_20220627_2307.py b/src/core/api/app/projects/migrations/0030_auto_20220627_2307.py new file mode 100644 index 00000000..e52e7a3b --- /dev/null +++ b/src/core/api/app/projects/migrations/0030_auto_20220627_2307.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-06-27 23:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0029_alter_repository_url'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='dbt_home_path', + field=models.CharField(blank=True, default='transform', max_length=4096), + ), + migrations.AlterField( + model_name='environment', + name='dbt_profiles_dir', + field=models.CharField(blank=True, default='automate', max_length=4096), + ), + ] diff --git a/src/core/api/app/projects/migrations/0031_merge_0030_auto_20220627_2207_0030_auto_20220627_2307.py b/src/core/api/app/projects/migrations/0031_merge_0030_auto_20220627_2207_0030_auto_20220627_2307.py new file mode 100644 index 00000000..a6afad20 --- /dev/null +++ b/src/core/api/app/projects/migrations/0031_merge_0030_auto_20220627_2207_0030_auto_20220627_2307.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.6 on 2022-06-29 19:57 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0030_auto_20220627_2207'), + ('projects', '0030_auto_20220627_2307'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0032_environment_code_server_config.py b/src/core/api/app/projects/migrations/0032_environment_code_server_config.py new file mode 100644 index 00000000..d1a6dbc6 --- /dev/null +++ b/src/core/api/app/projects/migrations/0032_environment_code_server_config.py @@ -0,0 +1,22 @@ +# Generated by Django 3.2.6 on 2022-06-30 18:51 + +import core.fields +from django.db import migrations +import projects.models.environment + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0031_merge_0030_auto_20220627_2207_0030_auto_20220627_2307"), + ] + + operations = [ + migrations.AddField( + model_name="environment", + name="code_server_config", + field=core.fields.EncryptedJSONField( + blank=True, default=dict, editable=True, null=True + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0033_auto_20220707_1724.py b/src/core/api/app/projects/migrations/0033_auto_20220707_1724.py new file mode 100644 index 00000000..b67c0bfe --- /dev/null +++ b/src/core/api/app/projects/migrations/0033_auto_20220707_1724.py @@ -0,0 +1,71 @@ +# Generated by Django 3.2.6 on 2022-07-07 17:24 + +import core.fields +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('projects', '0032_environment_code_server_config'), + ] + + operations = [ + migrations.CreateModel( + name='SSLKey', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('key_type', models.CharField(choices=[('dsa', 'dsa'), ('rsa', 'rsa')], default='rsa', max_length=20)), + ('private', core.fields.EncryptedTextField(editable=True)), + ('public', models.TextField()), + ('for_users', models.BooleanField(default=True)), + ('generated', models.BooleanField(default=True)), + ], + options={ + 'verbose_name': 'SSL key', + }, + ), + migrations.RemoveConstraint( + model_name='usercredential', + name='User credential uniqueness', + ), + migrations.AddField( + model_name='sshkey', + name='created_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ssh_keys', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='sshkey', + name='for_users', + field=models.BooleanField(default=True, help_text="If for users, it can't be used on services."), + ), + migrations.AddField( + model_name='sshkey', + name='generated', + field=models.BooleanField(default=True, help_text='If not generated, it means the user provided it.'), + ), + migrations.AddConstraint( + model_name='usercredential', + constraint=models.UniqueConstraint(fields=('user', 'environment', 'connection'), name='User credential uniqueness'), + ), + migrations.AddField( + model_name='sslkey', + name='created_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ssl_keys', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='servicecredential', + name='ssl_key', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_credentials', to='projects.sslkey'), + ), + migrations.AddField( + model_name='usercredential', + name='ssl_key', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_credentials', to='projects.sslkey'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0034_connection_user_credentials_save_hook.py b/src/core/api/app/projects/migrations/0034_connection_user_credentials_save_hook.py new file mode 100644 index 00000000..d22d32a8 --- /dev/null +++ b/src/core/api/app/projects/migrations/0034_connection_user_credentials_save_hook.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-07-07 23:05 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0002_sqlhook'), + ('projects', '0033_auto_20220707_1724'), + ] + + operations = [ + migrations.AddField( + model_name='connection', + name='user_credentials_save_hook', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='codegen.sqlhook'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0035_alter_connectiontype_account.py b/src/core/api/app/projects/migrations/0035_alter_connectiontype_account.py new file mode 100644 index 00000000..d9f51128 --- /dev/null +++ b/src/core/api/app/projects/migrations/0035_alter_connectiontype_account.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-07-18 19:49 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0002_user_is_service_account'), + ('projects', '0034_connection_user_credentials_save_hook'), + ] + + operations = [ + migrations.AlterField( + model_name='connectiontype', + name='account', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connection_types', to='users.account'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0035_remove_connection_user_credentials_save_hook.py b/src/core/api/app/projects/migrations/0035_remove_connection_user_credentials_save_hook.py new file mode 100644 index 00000000..f6f9d309 --- /dev/null +++ b/src/core/api/app/projects/migrations/0035_remove_connection_user_credentials_save_hook.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-07-18 15:58 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0034_connection_user_credentials_save_hook'), + ] + + operations = [ + migrations.RemoveField( + model_name='connection', + name='user_credentials_save_hook', + ), + ] diff --git a/src/core/api/app/projects/migrations/0036_environment_dbt_profiles_yml_template.py b/src/core/api/app/projects/migrations/0036_environment_dbt_profiles_yml_template.py new file mode 100644 index 00000000..f6758c6e --- /dev/null +++ b/src/core/api/app/projects/migrations/0036_environment_dbt_profiles_yml_template.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-07-20 14:54 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0004_alter_template_context_type'), + ('projects', '0035_remove_connection_user_credentials_save_hook'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='dbt_profiles_yml_template', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='codegen.template'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0036_merge_20220720_1756.py b/src/core/api/app/projects/migrations/0036_merge_20220720_1756.py new file mode 100644 index 00000000..cdc306d4 --- /dev/null +++ b/src/core/api/app/projects/migrations/0036_merge_20220720_1756.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.6 on 2022-07-20 17:56 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0035_alter_connectiontype_account'), + ('projects', '0035_remove_connection_user_credentials_save_hook'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0037_merge_20220721_1352.py b/src/core/api/app/projects/migrations/0037_merge_20220721_1352.py new file mode 100644 index 00000000..7609378a --- /dev/null +++ b/src/core/api/app/projects/migrations/0037_merge_20220721_1352.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.6 on 2022-07-21 13:52 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0036_environment_dbt_profiles_yml_template'), + ('projects', '0036_merge_20220720_1756'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0038_usercredential_name.py b/src/core/api/app/projects/migrations/0038_usercredential_name.py new file mode 100644 index 00000000..44e7a10e --- /dev/null +++ b/src/core/api/app/projects/migrations/0038_usercredential_name.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-07-21 13:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0037_merge_20220721_1352'), + ] + + operations = [ + migrations.AddField( + model_name='usercredential', + name='name', + field=models.CharField(default='', max_length=130), + preserve_default=False, + ), + ] diff --git a/src/core/api/app/projects/migrations/0039_auto_20220721_1404.py b/src/core/api/app/projects/migrations/0039_auto_20220721_1404.py new file mode 100644 index 00000000..c9892873 --- /dev/null +++ b/src/core/api/app/projects/migrations/0039_auto_20220721_1404.py @@ -0,0 +1,26 @@ +# Generated by Django 3.2.6 on 2022-07-21 14:04 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0038_usercredential_name'), + ] + + operations = [ + migrations.RemoveConstraint( + model_name='usercredential', + name='User credential uniqueness', + ), + migrations.AlterField( + model_name='usercredential', + name='name', + field=models.CharField(default='dev', max_length=130), + ), + migrations.AddConstraint( + model_name='usercredential', + constraint=models.UniqueConstraint(fields=('user', 'environment', 'name'), name='User credential uniqueness'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0040_alter_environment_dbt_profiles_yml_template.py b/src/core/api/app/projects/migrations/0040_alter_environment_dbt_profiles_yml_template.py new file mode 100644 index 00000000..346969e4 --- /dev/null +++ b/src/core/api/app/projects/migrations/0040_alter_environment_dbt_profiles_yml_template.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-07-21 19:05 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0006_alter_template_context_type'), + ('projects', '0039_auto_20220721_1404'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='dbt_profiles_yml_template', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='environment_dbt_profiles', to='codegen.template'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0041_auto_20220722_1445.py b/src/core/api/app/projects/migrations/0041_auto_20220722_1445.py new file mode 100644 index 00000000..c9ada08e --- /dev/null +++ b/src/core/api/app/projects/migrations/0041_auto_20220722_1445.py @@ -0,0 +1,108 @@ +# Generated by Django 3.2.6 on 2022-07-22 14:45 + +import autoslug.fields +from django.db import migrations, models +import django.db.models.deletion + + +def create_default_profile(apps, schema_editor): + Profile = apps.get_model("projects", "Profile") + Profile.objects.create(name="Default", slug="default") + + +class Migration(migrations.Migration): + + dependencies = [ + ("codegen", "0006_alter_template_context_type"), + ("projects", "0040_alter_environment_dbt_profiles_yml_template"), + ] + + operations = [ + migrations.CreateModel( + name="Profile", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("created_at", models.DateTimeField(auto_now_add=True)), + ("updated_at", models.DateTimeField(auto_now=True)), + ("name", models.CharField(max_length=32, unique=True)), + ( + "slug", + autoslug.fields.AutoSlugField( + editable=False, populate_from="name", unique=True + ), + ), + ("python_requirements", models.TextField(blank=True, null=True)), + ("dbt_sync", models.BooleanField(default=True)), + ("dbt_local_docs", models.BooleanField(default=True)), + ("mount_ssl_keys", models.BooleanField(default=True)), + ("mount_ssh_keys", models.BooleanField(default=True)), + ("mount_api_token", models.BooleanField(default=True)), + ], + options={ + "abstract": False, + }, + ), + migrations.RunPython(create_default_profile), + migrations.RemoveField( + model_name="environment", + name="code_server_config", + ), + migrations.RemoveField( + model_name="environment", + name="dbt_profiles_yml_template", + ), + migrations.CreateModel( + name="ProfileFile", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("name", models.CharField(max_length=32, unique=True)), + ( + "slug", + autoslug.fields.AutoSlugField( + editable=False, populate_from="name", unique=True + ), + ), + ("mount_path", models.CharField(max_length=32, unique=True)), + ( + "profile", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="projects.profile", + ), + ), + ( + "template", + models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + to="codegen.template", + ), + ), + ], + ), + migrations.AddField( + model_name="environment", + name="code_server_profile", + field=models.ForeignKey( + default=1, + on_delete=django.db.models.deletion.CASCADE, + to="projects.profile", + ), + preserve_default=False, + ), + ] diff --git a/src/core/api/app/projects/migrations/0042_auto_20220722_1510.py b/src/core/api/app/projects/migrations/0042_auto_20220722_1510.py new file mode 100644 index 00000000..2f6041e4 --- /dev/null +++ b/src/core/api/app/projects/migrations/0042_auto_20220722_1510.py @@ -0,0 +1,36 @@ +# Generated by Django 3.2.6 on 2022-07-22 15:10 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('codegen', '0007_auto_20220722_1510'), + ('users', '0002_user_is_service_account'), + ('projects', '0041_auto_20220722_1445'), + ] + + operations = [ + migrations.AddField( + model_name='profile', + name='account', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='users.account'), + ), + migrations.AddField( + model_name='profile', + name='is_global', + field=models.BooleanField(default=False, help_text='Global profiles are available on all accounts'), + ), + migrations.AlterField( + model_name='profilefile', + name='profile', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='files', to='projects.profile'), + ), + migrations.AlterField( + model_name='profilefile', + name='template', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='profile_files', to='codegen.template'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0043_alter_environment_code_server_profile.py b/src/core/api/app/projects/migrations/0043_alter_environment_code_server_profile.py new file mode 100644 index 00000000..fd8f83cc --- /dev/null +++ b/src/core/api/app/projects/migrations/0043_alter_environment_code_server_profile.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-07-22 15:30 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0042_auto_20220722_1510'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='code_server_profile', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='environments', to='projects.profile'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0044_auto_20220722_1922.py b/src/core/api/app/projects/migrations/0044_auto_20220722_1922.py new file mode 100644 index 00000000..333a4da9 --- /dev/null +++ b/src/core/api/app/projects/migrations/0044_auto_20220722_1922.py @@ -0,0 +1,22 @@ +# Generated by Django 3.2.6 on 2022-07-22 19:22 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0043_alter_environment_code_server_profile'), + ] + + operations = [ + migrations.RemoveField( + model_name='profile', + name='is_global', + ), + migrations.AlterField( + model_name='connectiontype', + name='name', + field=models.CharField(max_length=130, unique=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0045_alter_servicecredential_service.py b/src/core/api/app/projects/migrations/0045_alter_servicecredential_service.py new file mode 100644 index 00000000..360b4ffb --- /dev/null +++ b/src/core/api/app/projects/migrations/0045_alter_servicecredential_service.py @@ -0,0 +1,61 @@ +# Generated by Django 3.2.6 on 2022-07-29 16:53 + +from django.db import migrations, models + + +def associate_new_permissions(apps, schema_editor): + from django.conf import settings + + Account = apps.get_model("users", "Account") + Project = apps.get_model("projects", "Project") + Permission = apps.get_model("auth", "Permission") + ContentType = apps.get_model("contenttypes", "ContentType") + content_type = ContentType.objects.get_for_model(Account) + + for project in Project.objects.all(): + for resource in settings.WORKBENCH_RESOURCES: + w_name = f"{project.account.slug}:{project.slug}|{resource}|{settings.ACTION_WRITE}" + r_name = f"{project.account.slug}:{project.slug}|{resource}|{settings.ACTION_READ}" + Permission.objects.get_or_create( + name=w_name, + content_type=content_type, + defaults={"codename": w_name[:100]}, + ) + Permission.objects.get_or_create( + name=r_name, + content_type=content_type, + defaults={"codename": r_name[:100]}, + ) + + permissions = Permission.objects.filter( + name__contains=f":{project.slug}|workbench:local-dbt-docs" + ) + for ext_group in project.extendedgroup_set.filter(role="project_developer"): + for permission in permissions: + ext_group.group.permissions.add(permission) + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0044_auto_20220722_1922"), + ("users", "0001_initial"), + ] + + operations = [ + migrations.AlterField( + model_name="servicecredential", + name="service", + field=models.CharField( + choices=[ + ("airbyte", "Airbyte"), + ("airflow", "Airflow"), + ("code-server", "Code-Server"), + ("dbt-docs", "Dbt-Docs"), + ("local-dbt-docs", "Local-Dbt-Docs"), + ("superset", "Superset"), + ], + max_length=50, + ), + ), + migrations.RunPython(associate_new_permissions), + ] diff --git a/src/core/api/app/projects/migrations/0046_auto_20220802_1622.py b/src/core/api/app/projects/migrations/0046_auto_20220802_1622.py new file mode 100644 index 00000000..ea774908 --- /dev/null +++ b/src/core/api/app/projects/migrations/0046_auto_20220802_1622.py @@ -0,0 +1,33 @@ +# Generated by Django 3.2.6 on 2022-08-02 16:22 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0045_alter_servicecredential_service'), + ] + + operations = [ + migrations.AddField( + model_name='profile', + name='clone_repository', + field=models.BooleanField(default=True), + ), + migrations.AddField( + model_name='profile', + name='vscode_extensions', + field=models.JSONField(default=list), + ), + migrations.AddField( + model_name='profilefile', + name='execute', + field=models.BooleanField(default=False, help_text='Specifies if file should be executed, requires shebang set on file'), + ), + migrations.AddField( + model_name='profilefile', + name='override_existent', + field=models.BooleanField(default=False), + ), + ] diff --git a/src/core/api/app/projects/migrations/0047_auto_20220802_2021.py b/src/core/api/app/projects/migrations/0047_auto_20220802_2021.py new file mode 100644 index 00000000..b05da703 --- /dev/null +++ b/src/core/api/app/projects/migrations/0047_auto_20220802_2021.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-08-02 20:21 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0046_auto_20220802_1622'), + ] + + operations = [ + migrations.AddField( + model_name='usercredential', + name='validated_at', + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name='userrepository', + name='validated_at', + field=models.DateTimeField(blank=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0048_auto_20220803_2044.py b/src/core/api/app/projects/migrations/0048_auto_20220803_2044.py new file mode 100644 index 00000000..4a174bd3 --- /dev/null +++ b/src/core/api/app/projects/migrations/0048_auto_20220803_2044.py @@ -0,0 +1,24 @@ +# Generated by Django 3.2.6 on 2022-08-03 20:44 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0047_auto_20220802_2021'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='pomerium_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='profile', + name='vscode_extensions', + field=models.JSONField(blank=True, default=list, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0049_connection_project and name uniqueness.py b/src/core/api/app/projects/migrations/0049_connection_project and name uniqueness.py new file mode 100644 index 00000000..b6b70abf --- /dev/null +++ b/src/core/api/app/projects/migrations/0049_connection_project and name uniqueness.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-08-09 22:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0048_auto_20220803_2044'), + ] + + operations = [ + migrations.AddConstraint( + model_name='connection', + constraint=models.UniqueConstraint(fields=('project', 'name'), name='Project and name uniqueness'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0050_auto_20220812_1526.py b/src/core/api/app/projects/migrations/0050_auto_20220812_1526.py new file mode 100644 index 00000000..f494a9e1 --- /dev/null +++ b/src/core/api/app/projects/migrations/0050_auto_20220812_1526.py @@ -0,0 +1,26 @@ +# Generated by Django 3.2.6 on 2022-08-12 15:26 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0049_connection_project and name uniqueness'), + ] + + operations = [ + migrations.RemoveConstraint( + model_name='servicecredential', + name='Environment service credential uniqueness', + ), + migrations.RenameField( + model_name='servicecredential', + old_name='target', + new_name='name', + ), + migrations.AddConstraint( + model_name='servicecredential', + constraint=models.UniqueConstraint(fields=('environment', 'service', 'name'), name='Environment service credential uniqueness'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0051_auto_20220815_1930.py b/src/core/api/app/projects/migrations/0051_auto_20220815_1930.py new file mode 100644 index 00000000..110430b0 --- /dev/null +++ b/src/core/api/app/projects/migrations/0051_auto_20220815_1930.py @@ -0,0 +1,37 @@ +# Generated by Django 3.2.6 on 2022-08-15 19:30 + +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0050_auto_20220812_1526'), + ] + + operations = [ + migrations.RemoveField( + model_name='sshkey', + name='for_users', + ), + migrations.RemoveField( + model_name='sslkey', + name='for_users', + ), + migrations.AddField( + model_name='servicecredential', + name='validated_at', + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name='sshkey', + name='project', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ssh_keys', to='projects.project'), + ), + migrations.AddField( + model_name='sslkey', + name='project', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='ssl_keys', to='projects.project'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0052_auto_20220818_1156.py b/src/core/api/app/projects/migrations/0052_auto_20220818_1156.py new file mode 100644 index 00000000..80d7ba50 --- /dev/null +++ b/src/core/api/app/projects/migrations/0052_auto_20220818_1156.py @@ -0,0 +1,41 @@ +# Generated by Django 3.2.6 on 2022-08-18 11:56 + +import autoslug.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0051_auto_20220815_1930'), + ] + + operations = [ + migrations.AlterField( + model_name='profilefile', + name='mount_path', + field=models.CharField(max_length=32), + ), + migrations.AlterField( + model_name='profilefile', + name='name', + field=models.CharField(max_length=32), + ), + migrations.AlterField( + model_name='profilefile', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, populate_from='name'), + ), + migrations.AddConstraint( + model_name='profilefile', + constraint=models.UniqueConstraint(fields=('profile', 'name'), name='Profile file name uniqueness'), + ), + migrations.AddConstraint( + model_name='profilefile', + constraint=models.UniqueConstraint(fields=('profile', 'slug'), name='Profile file slug uniqueness'), + ), + migrations.AddConstraint( + model_name='profilefile', + constraint=models.UniqueConstraint(fields=('profile', 'mount_path'), name='Profile file mount path uniqueness'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0053_alter_profilefile_mount_path.py b/src/core/api/app/projects/migrations/0053_alter_profilefile_mount_path.py new file mode 100644 index 00000000..20ee6d07 --- /dev/null +++ b/src/core/api/app/projects/migrations/0053_alter_profilefile_mount_path.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-08-19 14:06 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0052_auto_20220818_1156'), + ] + + operations = [ + migrations.AlterField( + model_name='profilefile', + name='mount_path', + field=models.CharField(max_length=250), + ), + ] diff --git a/src/core/api/app/projects/migrations/0053_auto_20220822_1242.py b/src/core/api/app/projects/migrations/0053_auto_20220822_1242.py new file mode 100644 index 00000000..6ba6d01b --- /dev/null +++ b/src/core/api/app/projects/migrations/0053_auto_20220822_1242.py @@ -0,0 +1,27 @@ +# Generated by Django 3.2.6 on 2022-08-22 12:42 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0052_auto_20220818_1156'), + ] + + operations = [ + migrations.RemoveField( + model_name='project', + name='dbt_docs_branch', + ), + migrations.AddField( + model_name='project', + name='ci_home_url', + field=models.URLField(blank=True, max_length=250, null=True), + ), + migrations.AddField( + model_name='project', + name='ci_provider', + field=models.CharField(blank=True, choices=[('github', 'GitHub'), ('gitlab', 'Gitlab'), ('bamboo', 'Bamboo'), ('jenkins', 'Jenkins'), ('circleci', 'CircleCI')], max_length=50, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0054_auto_20220822_1610.py b/src/core/api/app/projects/migrations/0054_auto_20220822_1610.py new file mode 100644 index 00000000..9b95995d --- /dev/null +++ b/src/core/api/app/projects/migrations/0054_auto_20220822_1610.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-08-22 16:10 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0053_auto_20220822_1242'), + ] + + operations = [ + migrations.AddField( + model_name='project', + name='validated_at', + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AlterField( + model_name='project', + name='ci_provider', + field=models.CharField(blank=True, choices=[('github', 'GitHub'), ('gitlab', 'Gitlab'), ('bamboo', 'Bamboo'), ('jenkins', 'Jenkins'), ('circleci', 'CircleCI'), ('other', 'Other')], max_length=50, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0055_merge_20220826_1326.py b/src/core/api/app/projects/migrations/0055_merge_20220826_1326.py new file mode 100644 index 00000000..2ea4accd --- /dev/null +++ b/src/core/api/app/projects/migrations/0055_merge_20220826_1326.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.6 on 2022-08-26 13:26 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0053_alter_profilefile_mount_path'), + ('projects', '0054_auto_20220822_1610'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0056_auto_20220826_1507.py b/src/core/api/app/projects/migrations/0056_auto_20220826_1507.py new file mode 100644 index 00000000..4419284a --- /dev/null +++ b/src/core/api/app/projects/migrations/0056_auto_20220826_1507.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-08-26 15:07 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0055_merge_20220826_1326'), + ] + + operations = [ + migrations.RemoveField( + model_name='environment', + name='dbt_docs_branch', + ), + migrations.AddField( + model_name='environment', + name='dbt_docs_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0057_auto_20220826_2147.py b/src/core/api/app/projects/migrations/0057_auto_20220826_2147.py new file mode 100644 index 00000000..152ceaf5 --- /dev/null +++ b/src/core/api/app/projects/migrations/0057_auto_20220826_2147.py @@ -0,0 +1,33 @@ +# Generated by Django 3.2.6 on 2022-08-26 21:47 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion +import django.utils.timezone + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('projects', '0056_auto_20220826_1507'), + ] + + operations = [ + migrations.CreateModel( + name='UserEnvironment', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('heartbeat_at', models.DateTimeField(default=django.utils.timezone.now)), + ('code_server_active', models.BooleanField(default=True)), + ('environment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='projects.environment')), + ('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), + ], + ), + migrations.AddConstraint( + model_name='userenvironment', + constraint=models.UniqueConstraint(fields=('environment', 'user'), name='Environment user uniqueness'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0058_auto_20220829_1848.py b/src/core/api/app/projects/migrations/0058_auto_20220829_1848.py new file mode 100644 index 00000000..21fc0090 --- /dev/null +++ b/src/core/api/app/projects/migrations/0058_auto_20220829_1848.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-08-29 18:48 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0057_auto_20220826_2147'), + ] + + operations = [ + migrations.AddField( + model_name='userenvironment', + name='code_server_access', + field=models.CharField(choices=[('private', 'private'), ('authenticated', 'authenticated'), ('public', 'public')], default='private', help_text='Who can access code-server? Change with caution as this configuration may expose sensitive information.', max_length=50), + ), + migrations.AddField( + model_name='userenvironment', + name='code_server_last_shared_at', + field=models.DateTimeField(blank=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0059_auto_20220829_2302.py b/src/core/api/app/projects/migrations/0059_auto_20220829_2302.py new file mode 100644 index 00000000..46da00bf --- /dev/null +++ b/src/core/api/app/projects/migrations/0059_auto_20220829_2302.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-08-29 23:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0058_auto_20220829_1848'), + ] + + operations = [ + migrations.AddField( + model_name='userenvironment', + name='services', + field=models.JSONField(blank=True, default=dict, help_text="Dict of http services listening on code server pod, i.e. 'django': {'port': 3000, 'access': 'private'}", null=True), + ), + migrations.AlterField( + model_name='userenvironment', + name='code_server_last_shared_at', + field=models.DateTimeField(blank=True, help_text='For security reasons, access will be changed back to private after 2 hours elapsed from this datetime', null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0060_auto_20220831_0356.py b/src/core/api/app/projects/migrations/0060_auto_20220831_0356.py new file mode 100644 index 00000000..5be25d7a --- /dev/null +++ b/src/core/api/app/projects/migrations/0060_auto_20220831_0356.py @@ -0,0 +1,31 @@ +# Generated by Django 3.2.6 on 2022-08-31 03:56 + +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('projects', '0059_auto_20220829_2302'), + ] + + operations = [ + migrations.AddField( + model_name='userenvironment', + name='code_server_share_code', + field=models.CharField(blank=True, max_length=10, null=True, unique=True), + ), + migrations.AlterField( + model_name='userenvironment', + name='environment', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_environments', to='projects.environment'), + ), + migrations.AlterField( + model_name='userenvironment', + name='user', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_environments', to=settings.AUTH_USER_MODEL), + ), + ] diff --git a/src/core/api/app/projects/migrations/0061_auto_20220905_1539.py b/src/core/api/app/projects/migrations/0061_auto_20220905_1539.py new file mode 100644 index 00000000..e2eb2a3a --- /dev/null +++ b/src/core/api/app/projects/migrations/0061_auto_20220905_1539.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-09-05 15:39 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0060_auto_20220831_0356'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='settings', + field=models.JSONField(blank=True, default=dict, null=True), + ), + migrations.AlterField( + model_name='project', + name='settings', + field=models.JSONField(blank=True, default=dict, help_text='Settings propagated to all environment settings. Avoid reading this field, instead, read environment settings.', null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0062_userenvironment_code_server_restarted_at.py b/src/core/api/app/projects/migrations/0062_userenvironment_code_server_restarted_at.py new file mode 100644 index 00000000..2d97d54a --- /dev/null +++ b/src/core/api/app/projects/migrations/0062_userenvironment_code_server_restarted_at.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-09-05 17:26 + +from django.db import migrations, models +import django.utils.timezone + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0061_auto_20220905_1539'), + ] + + operations = [ + migrations.AddField( + model_name='userenvironment', + name='code_server_restarted_at', + field=models.DateTimeField(default=django.utils.timezone.now), + ), + ] diff --git a/src/core/api/app/projects/migrations/0063_auto_20220909_1631.py b/src/core/api/app/projects/migrations/0063_auto_20220909_1631.py new file mode 100644 index 00000000..14e87bd7 --- /dev/null +++ b/src/core/api/app/projects/migrations/0063_auto_20220909_1631.py @@ -0,0 +1,25 @@ +# Generated by Django 3.2.6 on 2022-09-09 16:31 + +import autoslug.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0062_userenvironment_code_server_restarted_at'), + ] + + operations = [ + migrations.AddField( + model_name='connectiontype', + name='slug', + field=autoslug.fields.AutoSlugField(default='snowflake', editable=False, populate_from='name'), + preserve_default=False, + ), + migrations.AlterField( + model_name='connectiontype', + name='name', + field=models.CharField(max_length=130), + ), + ] diff --git a/src/core/api/app/projects/migrations/0064_auto_20220912_1231.py b/src/core/api/app/projects/migrations/0064_auto_20220912_1231.py new file mode 100644 index 00000000..2b9efa53 --- /dev/null +++ b/src/core/api/app/projects/migrations/0064_auto_20220912_1231.py @@ -0,0 +1,26 @@ +# Generated by Django 3.2.6 on 2022-09-12 12:31 + +from django.db import migrations, models +import django.db.models.deletion +import projects.models.environment + + +class Migration(migrations.Migration): + + dependencies = [ + ('clusters', '0013_auto_20220728_1450'), + ('projects', '0063_auto_20220909_1631'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='cluster', + field=models.ForeignKey(default=projects.models.environment.default_cluster, on_delete=django.db.models.deletion.PROTECT, related_name='environments', to='clusters.cluster'), + ), + migrations.AlterField( + model_name='environment', + name='release', + field=models.ForeignKey(default=projects.models.environment.default_release, on_delete=django.db.models.deletion.PROTECT, related_name='environments', to='projects.release'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0065_alter_environment_code_server_profile.py b/src/core/api/app/projects/migrations/0065_alter_environment_code_server_profile.py new file mode 100644 index 00000000..a23f5d55 --- /dev/null +++ b/src/core/api/app/projects/migrations/0065_alter_environment_code_server_profile.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-09-12 13:58 + +from django.db import migrations, models +import django.db.models.deletion +import projects.models.environment + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0064_auto_20220912_1231'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='code_server_profile', + field=models.ForeignKey(default=projects.models.environment.default_profile, on_delete=django.db.models.deletion.CASCADE, related_name='environments', to='projects.profile'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0066_connection_connection_user.py b/src/core/api/app/projects/migrations/0066_connection_connection_user.py new file mode 100644 index 00000000..4140dbb8 --- /dev/null +++ b/src/core/api/app/projects/migrations/0066_connection_connection_user.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-09-14 20:30 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0065_alter_environment_code_server_profile'), + ] + + operations = [ + migrations.AddField( + model_name='connection', + name='connection_user', + field=models.CharField(choices=[('provided', 'User provided'), ('email_username', "Inferred from email's username")], default='provided', max_length=20), + ), + ] diff --git a/src/core/api/app/projects/migrations/0067_auto_20220928_1611.py b/src/core/api/app/projects/migrations/0067_auto_20220928_1611.py new file mode 100644 index 00000000..f99ee779 --- /dev/null +++ b/src/core/api/app/projects/migrations/0067_auto_20220928_1611.py @@ -0,0 +1,55 @@ +# Generated by Django 3.2.6 on 2022-09-28 16:11 + +import autoslug.fields +import django.db.models.deletion +import projects.models.connection +import projects.models.profile +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('integrations', '0001_initial'), + ('projects', '0066_connection_connection_user'), + ] + + operations = [ + migrations.CreateModel( + name='EnvironmentIntegration', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('service', models.CharField(choices=[('airbyte', 'Airbyte'), ('airflow', 'Airflow'), ('code-server', 'Code-Server'), ('dbt-docs', 'Dbt-Docs'), ('local-dbt-docs', 'Local-Dbt-Docs'), ('superset', 'Superset')], max_length=50)), + ('settings', models.JSONField(blank=True, default=dict, help_text='Specific configuration for the service that uses the integration', null=True)), + ], + options={ + 'abstract': False, + }, + ), + migrations.RemoveConstraint( + model_name='connectiontype', + name='Connection type account name uniqueness', + ), + migrations.AlterField( + model_name='connectiontype', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, populate_from=projects.models.connection.connectiontype_slug, unique=True), + ), + migrations.AlterField( + model_name='profile', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, populate_from=projects.models.profile.profile_slug, unique=True), + ), + migrations.AddField( + model_name='environmentintegration', + name='environment', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='integrations', to='projects.environment'), + ), + migrations.AddField( + model_name='environmentintegration', + name='integration', + field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='environments', to='integrations.integration'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0068_environment_quotas.py b/src/core/api/app/projects/migrations/0068_environment_quotas.py new file mode 100644 index 00000000..5aa0656b --- /dev/null +++ b/src/core/api/app/projects/migrations/0068_environment_quotas.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-10-18 12:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0067_auto_20220928_1611'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='quotas', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0069_remove_environment_last_sync_unmet_preconditions.py b/src/core/api/app/projects/migrations/0069_remove_environment_last_sync_unmet_preconditions.py new file mode 100644 index 00000000..f1c6ecf6 --- /dev/null +++ b/src/core/api/app/projects/migrations/0069_remove_environment_last_sync_unmet_preconditions.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.6 on 2022-11-25 17:26 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0068_environment_quotas'), + ] + + operations = [ + migrations.RemoveField( + model_name='environment', + name='last_sync_unmet_preconditions', + ), + ] diff --git a/src/core/api/app/projects/migrations/0070_blockedpodcreationrequest.py b/src/core/api/app/projects/migrations/0070_blockedpodcreationrequest.py new file mode 100644 index 00000000..ad08091e --- /dev/null +++ b/src/core/api/app/projects/migrations/0070_blockedpodcreationrequest.py @@ -0,0 +1,34 @@ +# Generated by Django 3.2.6 on 2022-11-10 04:34 + +import uuid + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0069_remove_environment_last_sync_unmet_preconditions'), + ] + + operations = [ + migrations.CreateModel( + name='BlockedPodCreationRequest', + fields=[ + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)), + ('request', models.JSONField(blank=True, default=dict, null=True)), + ('response', models.JSONField(blank=True, default=dict, null=True)), + ('request_uid', models.UUIDField(blank=True, null=True)), + ('uid', models.UUIDField(blank=True, null=True)), + ('creation_timestamp', models.DateTimeField(blank=True, null=True)), + ('kind', models.CharField(blank=True, max_length=200, null=True)), + ('name', models.CharField(blank=True, max_length=200, null=True)), + ('namespace', models.CharField(blank=True, max_length=200, null=True)), + ], + options={ + 'abstract': False, + }, + ), + ] diff --git a/src/core/api/app/projects/migrations/0071_auto_20221228_1642.py b/src/core/api/app/projects/migrations/0071_auto_20221228_1642.py new file mode 100644 index 00000000..19d1fba0 --- /dev/null +++ b/src/core/api/app/projects/migrations/0071_auto_20221228_1642.py @@ -0,0 +1,76 @@ +# Generated by Django 3.2.6 on 2022-12-28 16:42 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0070_blockedpodcreationrequest'), + ] + + operations = [ + migrations.RenameField( + model_name='environment', + old_name='code_server_profile', + new_name='profile', + ), + migrations.RemoveField( + model_name='profile', + name='python_requirements', + ), + migrations.RemoveField( + model_name='profile', + name='vscode_extensions', + ), + migrations.AlterField( + model_name='profile', + name='clone_repository', + field=models.BooleanField(default=True, help_text='When enabled, the project git repository gets cloned automatically'), + ), + migrations.AlterField( + model_name='profile', + name='dbt_local_docs', + field=models.BooleanField(default=True, help_text='If enabled, a web server is launched to serve local dbt docs'), + ), + migrations.AlterField( + model_name='profile', + name='dbt_sync', + field=models.BooleanField(default=True, help_text='If enabled, dbt osmosis gets installed as a requirement of the datacoves power user extension'), + ), + migrations.AlterField( + model_name='profile', + name='mount_api_token', + field=models.BooleanField(default=True, help_text='If enabled, an api_token is mounted as environment variable'), + ), + migrations.AlterField( + model_name='profile', + name='mount_ssh_keys', + field=models.BooleanField(default=True, help_text='When enabled, ssl keys are mounted under /config/.ssh/'), + ), + migrations.AlterField( + model_name='profile', + name='mount_ssl_keys', + field=models.BooleanField(default=True, help_text='When enabled, ssl keys are mounted under /config/.ssl/'), + ), + migrations.AlterField( + model_name='profilefile', + name='override_existent', + field=models.BooleanField(default=False, help_text='When enabled, if a file is found, it will be overwritten.'), + ), + migrations.CreateModel( + name='ProfileImageSet', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('python_requirements', models.JSONField(blank=True, default=list, help_text='List of python libs to be used in both airflow and code server images, e.g. ["Django==3.2.6"]', null=True)), + ('airflow_requirements', models.JSONField(blank=True, default=list, help_text='List of python libs to be used in airflow images, e.g. ["Django==3.2.6"]', null=True)), + ('code_server_requirements', models.JSONField(blank=True, default=list, help_text='List of python libs to be used in code server images, e.g. ["Django==3.2.6"]', null=True)), + ('code_server_extensions', models.JSONField(blank=True, default=list, help_text='List of urls to vscode extensions that will be downloaded, unzipped and installed.', null=True)), + ('images_status', models.JSONField(blank=True, default=dict, help_text='A dictionary mapping docker image names to their build status.', null=True)), + ('images', models.JSONField(blank=True, default=dict, help_text="A dictionary mapping docker image names to tags (versions). if empty, it means the build process didn't complete", null=True)), + ('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='image_sets', to='projects.profile')), + ('release', models.ForeignKey(help_text='Release that contains images from where new images will be based.', on_delete=django.db.models.deletion.PROTECT, to='projects.release')), + ], + ), + ] diff --git a/src/core/api/app/projects/migrations/0072_auto_20230118_1811.py b/src/core/api/app/projects/migrations/0072_auto_20230118_1811.py new file mode 100644 index 00000000..b468ceb0 --- /dev/null +++ b/src/core/api/app/projects/migrations/0072_auto_20230118_1811.py @@ -0,0 +1,31 @@ +# Generated by Django 3.2.16 on 2023-01-18 18:11 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0071_auto_20221228_1642'), + ] + + operations = [ + migrations.RemoveField( + model_name='sshkey', + name='project', + ), + migrations.RemoveField( + model_name='sslkey', + name='project', + ), + migrations.AddField( + model_name='sshkey', + name='usage', + field=models.CharField(choices=[('user', 'User'), ('project', 'Project')], default='user', max_length=20), + ), + migrations.AddField( + model_name='sslkey', + name='usage', + field=models.CharField(choices=[('user', 'User'), ('project', 'Project')], default='user', max_length=20), + ), + ] diff --git a/src/core/api/app/projects/migrations/0073_profile_files_from.py b/src/core/api/app/projects/migrations/0073_profile_files_from.py new file mode 100644 index 00000000..8129276b --- /dev/null +++ b/src/core/api/app/projects/migrations/0073_profile_files_from.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.16 on 2023-02-06 15:09 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0072_auto_20230118_1811'), + ] + + operations = [ + migrations.AddField( + model_name='profile', + name='files_from', + field=models.ForeignKey(blank=True, help_text='Profile used as starting point for files configuration. Files added to current profile are appended to the base profile files list.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='projects.profile'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0074_auto_20230222_1322.py b/src/core/api/app/projects/migrations/0074_auto_20230222_1322.py new file mode 100644 index 00000000..452a8ef2 --- /dev/null +++ b/src/core/api/app/projects/migrations/0074_auto_20230222_1322.py @@ -0,0 +1,38 @@ +# Generated by Django 3.2.16 on 2023-02-22 13:22 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("codegen", "0016_auto_20230206_1930"), + ("projects", "0073_profile_files_from"), + ] + + operations = [ + migrations.AddField( + model_name="connection", + name="connection_user_template", + field=models.ForeignKey( + blank=True, + null=True, + on_delete=django.db.models.deletion.SET_NULL, + to="codegen.template", + ), + ), + migrations.AlterField( + model_name="connection", + name="connection_user", + field=models.CharField( + choices=[ + ("provided", "User provided"), + ("email_username", "Inferred from email's username"), + ("template", "Inferred from user info using a custom template"), + ], + default="provided", + max_length=20, + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0075_auto_20230223_1835.py b/src/core/api/app/projects/migrations/0075_auto_20230223_1835.py new file mode 100644 index 00000000..4888a027 --- /dev/null +++ b/src/core/api/app/projects/migrations/0075_auto_20230223_1835.py @@ -0,0 +1,57 @@ +# Generated by Django 3.2.16 on 2023-02-23 18:35 + +import core.fields +import django.db.models.deletion +from django.db import migrations, models + + +def update_permissions(apps, schema_editor): + Permission = apps.get_model("auth", "Permission") + for permission in Permission.objects.filter(name__icontains="admin:connections|"): + permission.name = permission.name.replace( + "admin:connections", "admin:connectiontemplates" + ) + permission.save() + + +class Migration(migrations.Migration): + dependencies = [ + ("codegen", "0017_alter_template_format"), + ("projects", "0074_auto_20230222_1322"), + ] + + operations = [ + migrations.RenameModel( + old_name="Connection", + new_name="ConnectionTemplate", + ), + migrations.RenameField( + model_name="servicecredential", + old_name="connection", + new_name="connection_template", + ), + migrations.RenameField( + model_name="usercredential", + old_name="connection", + new_name="connection_template", + ), + migrations.AlterField( + model_name="connectiontemplate", + name="project", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="connection_templates", + to="projects.project", + ), + ), + migrations.AlterField( + model_name="connectiontemplate", + name="type", + field=models.ForeignKey( + on_delete=django.db.models.deletion.CASCADE, + related_name="connection_templates", + to="projects.connectiontype", + ), + ), + migrations.RunPython(update_permissions), + ] diff --git a/src/core/api/app/projects/migrations/0076_update_account_permissions.py b/src/core/api/app/projects/migrations/0076_update_account_permissions.py new file mode 100644 index 00000000..bccd6477 --- /dev/null +++ b/src/core/api/app/projects/migrations/0076_update_account_permissions.py @@ -0,0 +1,51 @@ +# Generated by Django 3.2.16 on 2023-02-23 18:35 + +from django.conf import settings +from django.db import migrations + + +def create_permissions(apps, account): + ContentType = apps.get_model("contenttypes", "ContentType") + Permission = apps.get_model("auth", "Permission") + content_type = ContentType.objects.get(app_label="users", model="account") + for resource in settings.ACCOUNT_RESOURCES: + w_name = f"{account.slug}|{resource}|{settings.ACTION_WRITE}" + r_name = f"{account.slug}|{resource}|{settings.ACTION_READ}" + Permission.objects.get_or_create( + name=w_name, + content_type=content_type, + defaults={"codename": w_name[:100]}, + ) + Permission.objects.get_or_create( + name=r_name, + content_type=content_type, + defaults={"codename": r_name[:100]}, + ) + + +def reassign_account_groups_permissions(apps, account): + ExtendedGroup = apps.get_model("users", "ExtendedGroup") + Permission = apps.get_model("auth", "Permission") + ext_group = ExtendedGroup.objects.get(account=account, role="account_admin") + if ext_group: + for permission in Permission.objects.filter( + name__startswith=f"{account.slug}|" + ): + ext_group.group.permissions.add(permission) + + +def update_account_permissions(apps, schema_editor): + Account = apps.get_model("users", "Account") + for account in Account.objects.all(): + create_permissions(apps, account) + reassign_account_groups_permissions(apps, account) + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0075_auto_20230223_1835"), + ] + + operations = [ + migrations.RunPython(update_account_permissions), + ] diff --git a/src/core/api/app/projects/migrations/0077_alter_userenvironment_services.py b/src/core/api/app/projects/migrations/0077_alter_userenvironment_services.py new file mode 100644 index 00000000..254df3a5 --- /dev/null +++ b/src/core/api/app/projects/migrations/0077_alter_userenvironment_services.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-03-06 12:37 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0076_update_account_permissions'), + ] + + operations = [ + migrations.AlterField( + model_name='userenvironment', + name='services', + field=models.JSONField(blank=True, default=dict, help_text='Dict of http services listening on code server pod, i.e. {"django": {"port": 3000, "access": "private", "websockets": "true"}}', null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0078_release_observability_images.py b/src/core/api/app/projects/migrations/0078_release_observability_images.py new file mode 100644 index 00000000..347963ea --- /dev/null +++ b/src/core/api/app/projects/migrations/0078_release_observability_images.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-03-08 20:26 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0077_alter_userenvironment_services'), + ] + + operations = [ + migrations.AddField( + model_name='release', + name='observability_images', + field=models.JSONField(default=list), + ), + ] diff --git a/src/core/api/app/projects/migrations/0079_environment_update_strategy.py b/src/core/api/app/projects/migrations/0079_environment_update_strategy.py new file mode 100644 index 00000000..92cf4b02 --- /dev/null +++ b/src/core/api/app/projects/migrations/0079_environment_update_strategy.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-03-14 17:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0078_release_observability_images'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='update_strategy', + field=models.CharField(choices=[('latest', 'Update to latest'), ('freezed', 'Freeze release'), ('minor', 'Update to latest minor patch'), ('major', 'Update to latest major patch')], default='freezed', max_length=10), + ), + ] diff --git a/src/core/api/app/projects/migrations/0080_release_core_images.py b/src/core/api/app/projects/migrations/0080_release_core_images.py new file mode 100644 index 00000000..56a1c9f4 --- /dev/null +++ b/src/core/api/app/projects/migrations/0080_release_core_images.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-03-16 01:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0079_environment_update_strategy'), + ] + + operations = [ + migrations.AddField( + model_name='release', + name='core_images', + field=models.JSONField(default=list), + ), + ] diff --git a/src/core/api/app/projects/migrations/0081_update_account_permissions.py b/src/core/api/app/projects/migrations/0081_update_account_permissions.py new file mode 100644 index 00000000..67745aad --- /dev/null +++ b/src/core/api/app/projects/migrations/0081_update_account_permissions.py @@ -0,0 +1,51 @@ +# Generated by Django 3.2.16 on 2023-02-23 18:35 + +from django.conf import settings +from django.db import migrations + + +def create_permissions(apps, account): + ContentType = apps.get_model("contenttypes", "ContentType") + Permission = apps.get_model("auth", "Permission") + content_type = ContentType.objects.get(app_label="users", model="account") + for resource in settings.ACCOUNT_RESOURCES: + w_name = f"{account.slug}|{resource}|{settings.ACTION_WRITE}" + r_name = f"{account.slug}|{resource}|{settings.ACTION_READ}" + Permission.objects.get_or_create( + name=w_name, + content_type=content_type, + defaults={"codename": w_name[:100]}, + ) + Permission.objects.get_or_create( + name=r_name, + content_type=content_type, + defaults={"codename": r_name[:100]}, + ) + + +def reassign_account_groups_permissions(apps, account): + ExtendedGroup = apps.get_model("users", "ExtendedGroup") + Permission = apps.get_model("auth", "Permission") + ext_group = ExtendedGroup.objects.get(account=account, role="account_admin") + if ext_group: + for permission in Permission.objects.filter( + name__startswith=f"{account.slug}|" + ): + ext_group.group.permissions.add(permission) + + +def update_account_permissions(apps, schema_editor): + Account = apps.get_model("users", "Account") + for account in Account.objects.all(): + create_permissions(apps, account) + reassign_account_groups_permissions(apps, account) + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0080_release_core_images"), + ] + + operations = [ + migrations.RunPython(update_account_permissions), + ] diff --git a/src/core/api/app/projects/migrations/0082_auto_20230420_1355.py b/src/core/api/app/projects/migrations/0082_auto_20230420_1355.py new file mode 100644 index 00000000..f50616db --- /dev/null +++ b/src/core/api/app/projects/migrations/0082_auto_20230420_1355.py @@ -0,0 +1,33 @@ +# Generated by Django 3.2.16 on 2023-04-20 13:55 + +import autoslug.fields +from django.db import migrations +import projects.models.profile + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0081_update_account_permissions"), + ] + + operations = [ + migrations.RemoveConstraint( + model_name="profilefile", + name="Profile file name uniqueness", + ), + migrations.RemoveConstraint( + model_name="profilefile", + name="Profile file mount path uniqueness", + ), + migrations.RemoveField( + model_name="profilefile", + name="name", + ), + migrations.AlterField( + model_name="profilefile", + name="slug", + field=autoslug.fields.AutoSlugField( + editable=False, populate_from=projects.models.profile.profile_file_slug + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0083_auto_20230425_1153.py b/src/core/api/app/projects/migrations/0083_auto_20230425_1153.py new file mode 100644 index 00000000..760d063d --- /dev/null +++ b/src/core/api/app/projects/migrations/0083_auto_20230425_1153.py @@ -0,0 +1,48 @@ +# Generated by Django 3.2.16 on 2023-04-25 11:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0082_auto_20230420_1355'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='release_profile', + field=models.CharField(choices=[('dbt-snowflake', 'dbt-snowflake'), ('dbt-redshift', 'dbt-redshift'), ('dbt-bigquery', 'dbt-bigquery'), ('dbt-databricks', 'dbt-databricks')], default='dbt-snowflake', max_length=50), + ), + migrations.AddField( + model_name='profileimageset', + name='build_airflow', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='profileimageset', + name='build_ci_airflow', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='profileimageset', + name='build_ci_basic', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='profileimageset', + name='build_code_server', + field=models.BooleanField(default=True), + ), + migrations.AddField( + model_name='profileimageset', + name='build_dbt_osmosis', + field=models.BooleanField(default=True), + ), + migrations.AddField( + model_name='profileimageset', + name='ci_requirements', + field=models.JSONField(blank=True, default=list, help_text='List of python libs to be used in ci images, e.g. ["Django==3.2.6"]', null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0084_alter_repository_url.py b/src/core/api/app/projects/migrations/0084_alter_repository_url.py new file mode 100644 index 00000000..a2029206 --- /dev/null +++ b/src/core/api/app/projects/migrations/0084_alter_repository_url.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-07-31 17:43 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0083_auto_20230425_1153'), + ] + + operations = [ + migrations.AlterField( + model_name='repository', + name='url', + field=models.URLField(blank=True, max_length=250, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0084_auto_20230703_2212.py b/src/core/api/app/projects/migrations/0084_auto_20230703_2212.py new file mode 100644 index 00000000..8b8d565c --- /dev/null +++ b/src/core/api/app/projects/migrations/0084_auto_20230703_2212.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.16 on 2023-07-03 22:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0083_auto_20230425_1153'), + ] + + operations = [ + migrations.RenameField( + model_name='profileimageset', + old_name='build_dbt_osmosis', + new_name='build_dbt_core_interface', + ), + migrations.AlterField( + model_name='profile', + name='dbt_sync', + field=models.BooleanField(default=True, help_text='If enabled, dbt core interface gets installed as a requirement of the datacoves power user extension'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0085_release_deprecated.py b/src/core/api/app/projects/migrations/0085_release_deprecated.py new file mode 100644 index 00000000..f6766615 --- /dev/null +++ b/src/core/api/app/projects/migrations/0085_release_deprecated.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-08-02 18:05 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0084_alter_repository_url'), + ] + + operations = [ + migrations.AddField( + model_name='release', + name='deprecated', + field=models.JSONField(default=dict), + ), + ] diff --git a/src/core/api/app/projects/migrations/0086_alter_environment_dbt_home_path.py b/src/core/api/app/projects/migrations/0086_alter_environment_dbt_home_path.py new file mode 100644 index 00000000..fede6979 --- /dev/null +++ b/src/core/api/app/projects/migrations/0086_alter_environment_dbt_home_path.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-08-30 17:29 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0085_release_deprecated'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='dbt_home_path', + field=models.CharField(blank=True, default='', max_length=4096), + ), + ] diff --git a/src/core/api/app/projects/migrations/0087_merge_20230901_1228.py b/src/core/api/app/projects/migrations/0087_merge_20230901_1228.py new file mode 100644 index 00000000..73acdcb5 --- /dev/null +++ b/src/core/api/app/projects/migrations/0087_merge_20230901_1228.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.16 on 2023-09-01 12:28 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0084_auto_20230703_2212'), + ('projects', '0086_alter_environment_dbt_home_path'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0088_auto_20231017_1950.py b/src/core/api/app/projects/migrations/0088_auto_20231017_1950.py new file mode 100644 index 00000000..29e8870e --- /dev/null +++ b/src/core/api/app/projects/migrations/0088_auto_20231017_1950.py @@ -0,0 +1,26 @@ +# Generated by Django 3.2.20 on 2023-10-17 19:50 + +import django.db.models.deletion +from django.conf import settings +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('projects', '0087_merge_20230901_1228'), + ] + + operations = [ + migrations.AddField( + model_name='profile', + name='created_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_profiles', to=settings.AUTH_USER_MODEL), + ), + migrations.AddField( + model_name='profile', + name='updated_by', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='updated_profiles', to=settings.AUTH_USER_MODEL), + ), + ] diff --git a/src/core/api/app/projects/migrations/0088_auto_20231018_1338.py b/src/core/api/app/projects/migrations/0088_auto_20231018_1338.py new file mode 100644 index 00000000..e82b0b9b --- /dev/null +++ b/src/core/api/app/projects/migrations/0088_auto_20231018_1338.py @@ -0,0 +1,29 @@ +# Generated by Django 3.2.16 on 2023-10-18 13:38 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0087_merge_20230901_1228'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='variables', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='project', + name='variables', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='userenvironment', + name='variables', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0089_environment_code_server_config.py b/src/core/api/app/projects/migrations/0089_environment_code_server_config.py new file mode 100644 index 00000000..722a071c --- /dev/null +++ b/src/core/api/app/projects/migrations/0089_environment_code_server_config.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.20 on 2023-11-02 23:09 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0088_auto_20231017_1950'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='code_server_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0089_merge_0088_auto_20231017_1950_0088_auto_20231018_1338.py b/src/core/api/app/projects/migrations/0089_merge_0088_auto_20231017_1950_0088_auto_20231018_1338.py new file mode 100644 index 00000000..5a48ba9d --- /dev/null +++ b/src/core/api/app/projects/migrations/0089_merge_0088_auto_20231017_1950_0088_auto_20231018_1338.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.20 on 2023-11-02 12:17 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0088_auto_20231017_1950'), + ('projects', '0088_auto_20231018_1338'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0090_release_profile_flags.py b/src/core/api/app/projects/migrations/0090_release_profile_flags.py new file mode 100644 index 00000000..1d71b221 --- /dev/null +++ b/src/core/api/app/projects/migrations/0090_release_profile_flags.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2023-11-15 15:01 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0089_environment_code_server_config'), + ] + + operations = [ + migrations.AddField( + model_name='release', + name='profile_flags', + field=models.JSONField(default=dict), + ), + ] diff --git a/src/core/api/app/projects/migrations/0091_merge_20231116_2016.py b/src/core/api/app/projects/migrations/0091_merge_20231116_2016.py new file mode 100644 index 00000000..e28ff588 --- /dev/null +++ b/src/core/api/app/projects/migrations/0091_merge_20231116_2016.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.20 on 2023-11-16 20:16 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0089_merge_0088_auto_20231017_1950_0088_auto_20231018_1338'), + ('projects', '0090_release_profile_flags'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0092_rename_services_userenvironment_exposures.py b/src/core/api/app/projects/migrations/0092_rename_services_userenvironment_exposures.py new file mode 100644 index 00000000..0a1567fc --- /dev/null +++ b/src/core/api/app/projects/migrations/0092_rename_services_userenvironment_exposures.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2023-12-05 19:22 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0091_merge_20231116_2016'), + ] + + operations = [ + migrations.RenameField( + model_name='userenvironment', + old_name='services', + new_name='exposures', + ), + ] diff --git a/src/core/api/app/projects/migrations/0093_userenvironment_services.py b/src/core/api/app/projects/migrations/0093_userenvironment_services.py new file mode 100644 index 00000000..cae50d9c --- /dev/null +++ b/src/core/api/app/projects/migrations/0093_userenvironment_services.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2023-12-05 19:34 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0092_rename_services_userenvironment_exposures'), + ] + + operations = [ + migrations.AddField( + model_name='userenvironment', + name='services', + field=models.JSONField(blank=True, default=dict, help_text='Dict to handle the state of services at user level and unmet preconditions if found.', null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0094_update_dbt_api_permissions.py b/src/core/api/app/projects/migrations/0094_update_dbt_api_permissions.py new file mode 100644 index 00000000..81f65949 --- /dev/null +++ b/src/core/api/app/projects/migrations/0094_update_dbt_api_permissions.py @@ -0,0 +1,32 @@ +# Generated by Django 3.2.16 on 2024-01-12 18:35 + +from django.conf import settings +from django.db import migrations + + +def update_or_create_dbt_api_permissions(apps, schema_editor): + Environment = apps.get_model("projects", "Environment") + Permission = apps.get_model("auth", "Permission") + ContentType = apps.get_model("contenttypes", "ContentType") + content_type = ContentType.objects.get(app_label="users", model="account") + + for env in Environment.objects.all(): + for resource in settings.DBT_API_RESOURCES: + name = resource.format( + cluster_domain=env.cluster.domain, env_slug=env.slug + ) + Permission.objects.get_or_create( + name=name, + content_type=content_type, + defaults={"codename": name[:100]}, + ) + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0093_userenvironment_services"), + ] + + operations = [ + migrations.RunPython(update_or_create_dbt_api_permissions), + ] diff --git a/src/core/api/app/projects/migrations/0095_alter_profilefile_override_existent.py b/src/core/api/app/projects/migrations/0095_alter_profilefile_override_existent.py new file mode 100644 index 00000000..c99dc81e --- /dev/null +++ b/src/core/api/app/projects/migrations/0095_alter_profilefile_override_existent.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-03-04 15:22 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0094_update_dbt_api_permissions'), + ] + + operations = [ + migrations.AlterField( + model_name='profilefile', + name='override_existent', + field=models.BooleanField(default=True, help_text='When enabled, if a file is found, it will be overwritten.'), + ), + ] diff --git a/src/core/api/app/projects/migrations/0096_release_promtail_chart.py b/src/core/api/app/projects/migrations/0096_release_promtail_chart.py new file mode 100644 index 00000000..969a98ea --- /dev/null +++ b/src/core/api/app/projects/migrations/0096_release_promtail_chart.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-03-06 22:28 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0095_alter_profilefile_override_existent'), + ] + + operations = [ + migrations.AddField( + model_name='release', + name='promtail_chart', + field=models.JSONField(default=dict), + ), + ] diff --git a/src/core/api/app/projects/migrations/0097_alter_environment_dbt_profiles_dir.py b/src/core/api/app/projects/migrations/0097_alter_environment_dbt_profiles_dir.py new file mode 100644 index 00000000..e2dba456 --- /dev/null +++ b/src/core/api/app/projects/migrations/0097_alter_environment_dbt_profiles_dir.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-04-03 14:15 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0096_release_promtail_chart'), + ] + + operations = [ + migrations.AlterField( + model_name='environment', + name='dbt_profiles_dir', + field=models.CharField(blank=True, default='automate/dbt', max_length=4096), + ), + ] diff --git a/src/core/api/app/projects/migrations/0097_auto_20240408_1710.py b/src/core/api/app/projects/migrations/0097_auto_20240408_1710.py new file mode 100644 index 00000000..5316b1ba --- /dev/null +++ b/src/core/api/app/projects/migrations/0097_auto_20240408_1710.py @@ -0,0 +1,29 @@ +# Generated by Django 3.2.20 on 2024-04-08 17:10 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0096_release_promtail_chart'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='grafana_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AlterField( + model_name='environmentintegration', + name='service', + field=models.CharField(choices=[('airbyte', 'Airbyte'), ('airflow', 'Airflow'), ('code-server', 'Code-Server'), ('dbt-docs', 'Dbt-Docs'), ('grafana', 'Grafana'), ('local-dbt-docs', 'Local-Dbt-Docs'), ('superset', 'Superset')], max_length=50), + ), + migrations.AlterField( + model_name='servicecredential', + name='service', + field=models.CharField(choices=[('airbyte', 'Airbyte'), ('airflow', 'Airflow'), ('code-server', 'Code-Server'), ('dbt-docs', 'Dbt-Docs'), ('grafana', 'Grafana'), ('local-dbt-docs', 'Local-Dbt-Docs'), ('superset', 'Superset')], max_length=50), + ), + ] diff --git a/src/core/api/app/projects/migrations/0098_merge_20240409_1649.py b/src/core/api/app/projects/migrations/0098_merge_20240409_1649.py new file mode 100644 index 00000000..a276218d --- /dev/null +++ b/src/core/api/app/projects/migrations/0098_merge_20240409_1649.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.20 on 2024-04-09 16:49 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0097_alter_environment_dbt_profiles_dir'), + ('projects', '0097_auto_20240408_1710'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0099_project_release_branch_protected.py b/src/core/api/app/projects/migrations/0099_project_release_branch_protected.py new file mode 100644 index 00000000..f195a65d --- /dev/null +++ b/src/core/api/app/projects/migrations/0099_project_release_branch_protected.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-05-07 13:53 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0098_merge_20240409_1649"), + ] + + operations = [ + migrations.AddField( + model_name="project", + name="release_branch_protected", + field=models.BooleanField(default=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0099_userenvironment_code_server_local_airflow_active.py b/src/core/api/app/projects/migrations/0099_userenvironment_code_server_local_airflow_active.py new file mode 100644 index 00000000..9699dc8c --- /dev/null +++ b/src/core/api/app/projects/migrations/0099_userenvironment_code_server_local_airflow_active.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-05-06 20:06 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0098_merge_20240409_1649'), + ] + + operations = [ + migrations.AddField( + model_name='userenvironment', + name='code_server_local_airflow_active', + field=models.BooleanField(default=False), + ), + ] diff --git a/src/core/api/app/projects/migrations/0100_merge_20240513_2100.py b/src/core/api/app/projects/migrations/0100_merge_20240513_2100.py new file mode 100644 index 00000000..936e6ac7 --- /dev/null +++ b/src/core/api/app/projects/migrations/0100_merge_20240513_2100.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.20 on 2024-05-13 21:00 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0099_project_release_branch_protected'), + ('projects', '0099_userenvironment_code_server_local_airflow_active'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0101_userenvironment_local_airflow_config.py b/src/core/api/app/projects/migrations/0101_userenvironment_local_airflow_config.py new file mode 100644 index 00000000..9cd77869 --- /dev/null +++ b/src/core/api/app/projects/migrations/0101_userenvironment_local_airflow_config.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.20 on 2024-06-03 16:44 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0100_merge_20240513_2100'), + ] + + operations = [ + migrations.AddField( + model_name='userenvironment', + name='local_airflow_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0102_auto_20240612_0211.py b/src/core/api/app/projects/migrations/0102_auto_20240612_0211.py new file mode 100644 index 00000000..bb67630e --- /dev/null +++ b/src/core/api/app/projects/migrations/0102_auto_20240612_0211.py @@ -0,0 +1,89 @@ +# Generated by Django 3.2.20 on 2024-06-12 02:11 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0101_userenvironment_local_airflow_config'), + ] + + operations = [ + migrations.AddField( + model_name='environment', + name='datahub_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='environment', + name='elastic_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='environment', + name='kafka_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='environment', + name='neo4j_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='environment', + name='postgresql_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, null=True), + ), + migrations.AddField( + model_name='release', + name='datahub_chart', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='datahub_images', + field=models.JSONField(default=list), + ), + migrations.AddField( + model_name='release', + name='elastic_chart', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='elastic_images', + field=models.JSONField(default=list), + ), + migrations.AddField( + model_name='release', + name='kafka_chart', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='kafka_images', + field=models.JSONField(default=list), + ), + migrations.AddField( + model_name='release', + name='neo4j_chart', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='neo4j_images', + field=models.JSONField(default=list), + ), + migrations.AddField( + model_name='release', + name='postgresql_chart', + field=models.JSONField(default=dict), + ), + migrations.AddField( + model_name='release', + name='postgresql_images', + field=models.JSONField(default=list), + ), + ] diff --git a/src/core/api/app/projects/migrations/0102_userenvironment_code_server_config.py b/src/core/api/app/projects/migrations/0102_userenvironment_code_server_config.py new file mode 100644 index 00000000..bf2bec11 --- /dev/null +++ b/src/core/api/app/projects/migrations/0102_userenvironment_code_server_config.py @@ -0,0 +1,25 @@ +# Generated by Django 3.2.20 on 2024-06-14 13:33 + +import core.fields +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0101_userenvironment_local_airflow_config"), + ] + + operations = [ + migrations.AddField( + model_name="userenvironment", + name="code_server_config", + field=core.fields.EncryptedJSONField( + blank=True, + default=dict, + editable=True, + help_text="Extra configuration for user's code-server", + null=True, + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0103_auto_20240612_0228.py b/src/core/api/app/projects/migrations/0103_auto_20240612_0228.py new file mode 100644 index 00000000..dbbf0780 --- /dev/null +++ b/src/core/api/app/projects/migrations/0103_auto_20240612_0228.py @@ -0,0 +1,155 @@ +# Generated by Django 3.2.20 on 2024-06-12 02:28 + +from django.conf import settings +from django.db import migrations + + +def default_services(): + return { + settings.SERVICE_AIRBYTE: {"enabled": False, "valid": True}, + settings.SERVICE_AIRFLOW: {"enabled": False, "valid": True}, + settings.SERVICE_CODE_SERVER: { + "enabled": False, + "valid": True, + }, + settings.SERVICE_DBT_DOCS: {"enabled": False, "valid": True}, + settings.SERVICE_SUPERSET: {"enabled": False, "valid": True}, + settings.SERVICE_DATAHUB: {"enabled": False, "valid": True}, + } + + +def default_internal_services(): + return { + settings.INTERNAL_SERVICE_MINIO: {"enabled": False}, + settings.INTERNAL_SERVICE_ELASTIC: {"enabled": False}, + settings.INTERNAL_SERVICE_NEO4J: {"enabled": False}, + settings.INTERNAL_SERVICE_POSTGRESQL: {"enabled": False}, + settings.INTERNAL_SERVICE_KAFKA: {"enabled": False}, + } + + + +def make_permission_name( + resource, + action, + scope=None, + account_slug=None, + project_slug=None, + environment_slug=None, +): + if scope is None: + if account_slug is None and project_slug is None and environment_slug is None: + return f"{resource}|{action}" + scope = [] + if account_slug: + scope.append(account_slug) + if project_slug: + scope.append(project_slug) + if environment_slug: + scope.append(environment_slug) + scope = ":".join(scope) + return f"{scope}|{resource}|{action}" + +def create_permissions(apps, account_slug, project_slug, environment_slug=None): + Account = apps.get_model("users", "Account") + Permission = apps.get_model("auth", "Permission") + ContentType = apps.get_model("contenttypes", "ContentType") + content_type = ContentType.objects.get_for_model(Account) + for resource in settings.WORKBENCH_RESOURCES: + for action in (settings.ACTION_READ, settings.ACTION_WRITE): + name = make_permission_name( + resource, + action, + account_slug=account_slug, + project_slug=project_slug, + environment_slug=environment_slug, + ) + + Permission.objects.get_or_create( + name=name, + content_type=content_type, + defaults={"codename": name[:100]}, + ) + + +def reassign_environment_permissions(apps, environment): + ExtendedGroup = apps.get_model("users", "ExtendedGroup") + Permission = apps.get_model("auth", "Permission") + + ext_group = ExtendedGroup.objects.filter(environment=environment, role="environment_viewer").first() + if ext_group: + for permission in Permission.objects.filter( + name__endswith=f"{environment.slug}|{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_READ}" + ): + ext_group.group.permissions.add(permission) + + ext_group = ExtendedGroup.objects.filter(environment=environment, role="environment_developer").first() + if ext_group: + for permission in Permission.objects.filter( + name__endswith=f"{environment.slug}|{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_WRITE}" + ): + ext_group.group.permissions.add(permission) + + ext_group = ExtendedGroup.objects.filter(environment=environment, role="environment_sysadmin").first() + if ext_group: + for permission in Permission.objects.filter( + name__endswith=f"{environment.slug}|{settings.SERVICE_DATAHUB_ADMIN}|{settings.ACTION_WRITE}" + ): + ext_group.group.permissions.add(permission) + + +def reassign_project_permissions(apps, project): + ExtendedGroup = apps.get_model("users", "ExtendedGroup") + Permission = apps.get_model("auth", "Permission") + + ext_group = ExtendedGroup.objects.filter(project=project, role="project_viewer").first() + if ext_group: + for permission in Permission.objects.filter( + name__endswith=f"{project.slug}|{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_READ}" + ): + ext_group.group.permissions.add(permission) + + ext_group = ExtendedGroup.objects.filter(project=project, role="project_developer").first() + if ext_group: + for permission in Permission.objects.filter( + name__endswith=f"{project.slug}|{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_WRITE}" + ): + ext_group.group.permissions.add(permission) + + ext_group = ExtendedGroup.objects.filter(project=project, role="project_sysadmin").first() + if ext_group: + for permission in Permission.objects.filter( + name__endswith=f"{project.slug}|{settings.SERVICE_DATAHUB_ADMIN}|{settings.ACTION_WRITE}" + ): + ext_group.group.permissions.add(permission) + + +def add_missing_default_services_to_environments(apps, schema_editor): + Environment = apps.get_model("projects", "Environment") + Project = apps.get_model("projects", "Project") + + for env in Environment.objects.all(): + services = default_services() + services.update(env.services) + internal_services = default_internal_services() + internal_services.update(env.internal_services) + env.services = services + env.internal_services = internal_services + env.save() + create_permissions(apps, env.project.account.slug, env.project.slug, environment_slug=env.slug) + reassign_environment_permissions(apps, env) + + for project in Project.objects.all(): + create_permissions(apps, project.account.slug, project.slug) + reassign_project_permissions(apps, project) + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0102_auto_20240612_0211'), + ] + + operations = [ + migrations.RunPython(add_missing_default_services_to_environments), + ] diff --git a/src/core/api/app/projects/migrations/0104_merge_20240626_1552.py b/src/core/api/app/projects/migrations/0104_merge_20240626_1552.py new file mode 100644 index 00000000..8032ce68 --- /dev/null +++ b/src/core/api/app/projects/migrations/0104_merge_20240626_1552.py @@ -0,0 +1,14 @@ +# Generated by Django 3.2.20 on 2024-06-26 15:52 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0102_userenvironment_code_server_config'), + ('projects', '0103_auto_20240612_0228'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0105_auto_20240701_2118.py b/src/core/api/app/projects/migrations/0105_auto_20240701_2118.py new file mode 100644 index 00000000..395eeebd --- /dev/null +++ b/src/core/api/app/projects/migrations/0105_auto_20240701_2118.py @@ -0,0 +1,462 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +import autoslug.fields +import core.fields +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion +import projects.models.connection +import projects.models.environment + + +class Migration(migrations.Migration): + + dependencies = [ + migrations.swappable_dependency(settings.AUTH_USER_MODEL), + ('codegen', '0022_auto_20240701_2118'), + ('users', '0021_auto_20240701_2118'), + ('projects', '0104_merge_20240626_1552'), + ] + + operations = [ + migrations.AlterField( + model_name='blockedpodcreationrequest', + name='request', + field=models.JSONField(blank=True, default=dict, help_text='Request received by webhook, as JSON dictionary', null=True), + ), + migrations.AlterField( + model_name='blockedpodcreationrequest', + name='response', + field=models.JSONField(blank=True, default=dict, help_text='Response sent, as JSON dictionary', null=True), + ), + migrations.AlterField( + model_name='connectiontemplate', + name='connection_details', + field=core.fields.EncryptedJSONField(default=dict, editable=True, help_text='This maps the keys needed (which are defined in ConnectionType.required_fieldsets) to default values which may be overriden by ServiceCredential or UserCredential.'), + ), + migrations.AlterField( + model_name='connectiontemplate', + name='connection_user', + field=models.CharField(choices=[('provided', 'User provided'), ('email_username', "Inferred from email's username"), ('template', 'Inferred from user info using a custom template')], default='provided', help_text='Can users configure their own credentials or are they restricted? See the Model class documentation for full details.', max_length=20), + ), + migrations.AlterField( + model_name='connectiontemplate', + name='connection_user_template', + field=models.ForeignKey(blank=True, help_text='Only used for custom templates connection user.', null=True, on_delete=django.db.models.deletion.SET_NULL, to='codegen.template'), + ), + migrations.AlterField( + model_name='connectiontemplate', + name='for_users', + field=models.BooleanField(default=True, help_text='Can users set this credential up for themselves?'), + ), + migrations.AlterField( + model_name='connectiontype', + name='account', + field=models.ForeignKey(blank=True, help_text='If null, this is a system level connection type.', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='connection_types', to='users.account'), + ), + migrations.AlterField( + model_name='connectiontype', + name='required_fieldsets', + field=models.JSONField(blank=True, default=list, help_text='list of lists: [["user", "password", "account"], ["user", "token", "account"]]', null=True), + ), + migrations.AlterField( + model_name='connectiontype', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, help_text='The slug is used as the type, unlike many models where slug and type are different fields.', populate_from=projects.models.connection.connectiontype_slug, unique=True), + ), + migrations.AlterField( + model_name='environment', + name='airbyte_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Airbyte-specfic configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='airflow_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Airflow-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='code_server_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Code Server-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='datahub_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of DataHub-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='dbt_docs_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of DBT Doc-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='docker_config', + field=core.fields.EncryptedJSONField(blank=True, default=projects.models.environment.default_docker_config, editable=True, help_text='An empty docker_config means core-api is not responsible for creating the secret, another system creates the secret named docker_config_secret_name.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='docker_registry', + field=models.CharField(blank=True, help_text='If not provided, this defaults to dockerhub.', max_length=253), + ), + migrations.AlterField( + model_name='environment', + name='elastic_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Elastic-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='grafana_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Grafana-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='internal_services', + field=models.JSONField(default=projects.models.environment.default_internal_services, help_text="Enable or disable certain internal services. This is a dictionary that maps service names to dictionaries that have configuration for each service; each has a configuration key 'enabled' which may be true or false."), + ), + migrations.AlterField( + model_name='environment', + name='kafka_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Kafka-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='minio_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Minio-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='neo4j_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Neo4J-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='pomerium_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Pomerium-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='postgresql_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of PostgreSQL-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='profile', + field=models.ForeignKey(default=projects.models.environment.default_profile, help_text='Profiles control files that are automatically generated for the environment and some credential items. They are also the linkage to Profile Image Sets which can control what images and python libraries are available to an environment.', on_delete=django.db.models.deletion.CASCADE, related_name='environments', to='projects.profile'), + ), + migrations.AlterField( + model_name='environment', + name='quotas', + field=models.JSONField(blank=True, default=dict, help_text='Quota configuration dictionary. This overrides whatever is set on the plan level. See the Plan model documentation for more details about how quotas work.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='release_profile', + field=models.CharField(choices=[('dbt-snowflake', 'dbt-snowflake'), ('dbt-redshift', 'dbt-redshift'), ('dbt-bigquery', 'dbt-bigquery'), ('dbt-databricks', 'dbt-databricks')], default='dbt-snowflake', help_text='We have different docker images for different backends; the release profile selects which set of docker images are used.', max_length=50), + ), + migrations.AlterField( + model_name='environment', + name='services', + field=models.JSONField(default=projects.models.environment.default_services, help_text='A map of services. The keys are the names of enabled services. Values are dictionaries, currently empty. May be used in the future to specify that a service is paused due to an expired trial, etc. For most configuration, though, think first of adding fields to Environment and Workspace spec.'), + ), + migrations.AlterField( + model_name='environment', + name='settings', + field=models.JSONField(blank=True, default=dict, help_text='A dictionary of general Environment settings.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='superset_config', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='A dictionary of Superset-specific configuration items.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='sync', + field=models.BooleanField(default=False, help_text="Does the environment need to be sync'd? This will set up the environment and start pods up as needed."), + ), + migrations.AlterField( + model_name='environment', + name='update_strategy', + field=models.CharField(choices=[('latest', 'Update to latest'), ('freezed', 'Freeze release'), ('minor', 'Update to latest minor patch'), ('major', 'Update to latest major patch')], default='freezed', help_text='How will system updates be applied to this environment.', max_length=10), + ), + migrations.AlterField( + model_name='environment', + name='variables', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='Dictionary of environment variables to provide to the pods; these are key-value pairs.', null=True), + ), + migrations.AlterField( + model_name='environment', + name='workspace_generation', + field=models.IntegerField(help_text="The last workspace's (kubernetes resource) generation we wrote.", null=True), + ), + migrations.AlterField( + model_name='profile', + name='created_by', + field=models.ForeignKey(blank=True, help_text='If created_by is null, it is a system profile', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='created_profiles', to=settings.AUTH_USER_MODEL), + ), + migrations.AlterField( + model_name='profilefile', + name='execute', + field=models.BooleanField(default=False, help_text='Specifies if file should be executed, requires shebang set on file. If this is set, override_existent will be forced to True.'), + ), + migrations.AlterField( + model_name='profilefile', + name='mount_path', + field=models.CharField(help_text='Path for the file', max_length=250), + ), + migrations.AlterField( + model_name='profileimageset', + name='build_airflow', + field=models.BooleanField(default=False, help_text='If True, the build_profile_image_set task will build this docker image using requirements specified in the profile image set.'), + ), + migrations.AlterField( + model_name='profileimageset', + name='build_ci_airflow', + field=models.BooleanField(default=False, help_text='If True, the build_profile_image_set task will build this docker image using requirements specified in the profile image set.'), + ), + migrations.AlterField( + model_name='profileimageset', + name='build_ci_basic', + field=models.BooleanField(default=False, help_text='If True, the build_profile_image_set task will build this docker image using requirements specified in the profile image set.'), + ), + migrations.AlterField( + model_name='profileimageset', + name='build_code_server', + field=models.BooleanField(default=True, help_text='If True, the build_profile_image_set task will build this docker image using requirements specified in the profile image set.'), + ), + migrations.AlterField( + model_name='profileimageset', + name='build_dbt_core_interface', + field=models.BooleanField(default=True, help_text='If True, the build_profile_image_set task will build this docker image using requirements specified in the profile image set.'), + ), + migrations.AlterField( + model_name='project', + name='ci_home_url', + field=models.URLField(blank=True, help_text='Base URL for CI, if CI is being used.', max_length=250, null=True), + ), + migrations.AlterField( + model_name='project', + name='deploy_credentials', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text="Required for HTTP clone stategy. This will be a JSON dictionary with keys 'git_username' and 'git_password'", null=True), + ), + migrations.AlterField( + model_name='project', + name='deploy_key', + field=models.ForeignKey(blank=True, help_text='Required for SSH clone strategy', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='projects', to='projects.sshkey'), + ), + migrations.AlterField( + model_name='project', + name='release_branch', + field=models.CharField(default='main', help_text='Which branch is used for releases in the GIT repository', max_length=130), + ), + migrations.AlterField( + model_name='project', + name='repository', + field=models.ForeignKey(help_text='GIT Repository to use for this project', on_delete=django.db.models.deletion.CASCADE, to='projects.repository'), + ), + migrations.AlterField( + model_name='project', + name='validated_at', + field=models.DateTimeField(blank=True, help_text='Projects must be validated for services to run. This is usually set by the system.', null=True), + ), + migrations.AlterField( + model_name='project', + name='variables', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='Environment variables used across the entire project', null=True), + ), + migrations.AlterField( + model_name='release', + name='airbyte_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='airbyte_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='airflow_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='airflow_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='ci_images', + field=models.JSONField(default=list, help_text='Dictionary mapping image names to tags for CI images'), + ), + migrations.AlterField( + model_name='release', + name='code_server_extensions', + field=models.JSONField(default=dict, help_text='Dictionary of VS Code extension names to versions which will be installed on a code server by default, unless overridden by a profile image set.'), + ), + migrations.AlterField( + model_name='release', + name='code_server_libraries', + field=models.JSONField(default=dict, help_text='Dictionary of python library names to versions which will be installed on code server by default, unless overridden by a profile image set.'), + ), + migrations.AlterField( + model_name='release', + name='commit', + field=models.CharField(help_text='GIT Commit Hash', max_length=100), + ), + migrations.AlterField( + model_name='release', + name='core_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='datahub_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='datahub_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='deprecated', + field=models.JSONField(default=dict, help_text="Dictionary mapping 'charts' and 'deployments' to list of deprecated images."), + ), + migrations.AlterField( + model_name='release', + name='elastic_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='elastic_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='images', + field=models.JSONField(default=dict, help_text='A dictionary mapping docker image names to tags (versions).'), + ), + migrations.AlterField( + model_name='release', + name='kafka_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='kafka_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='minio_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='neo4j_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='neo4j_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='notes', + field=models.TextField(blank=True, help_text='Release notes', null=True), + ), + migrations.AlterField( + model_name='release', + name='observability_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='postgresql_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='postgresql_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='release', + name='profile_flags', + field=models.JSONField(default=dict, help_text='Dictionary mapping environment profiles to dictionaries of flags.'), + ), + migrations.AlterField( + model_name='release', + name='promtail_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='superset_chart', + field=models.JSONField(default=dict, help_text='Helm Chart details for this service'), + ), + migrations.AlterField( + model_name='release', + name='superset_images', + field=models.JSONField(default=list, help_text='A list of docker image names and tags required by `images`'), + ), + migrations.AlterField( + model_name='repository', + name='git_url', + field=models.CharField(help_text='This may be a URL, or a ssh path such as: git@github.com:GROUP/REPO.git ... it will be forced to lower case on save.', max_length=250, unique=True), + ), + migrations.AlterField( + model_name='repository', + name='url', + field=models.URLField(blank=True, help_text='This only supports a URL and is optional.', max_length=250, null=True), + ), + migrations.AlterField( + model_name='servicecredential', + name='connection_overrides', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='These override settings in the ConnectionTemplate; the keys that should be set between a ServiceCredential and a ConnectionTemplate are defined in ConnectionType. This relationship is fully described in the ConnectionTemplate documentation.', null=True), + ), + migrations.AlterField( + model_name='servicecredential', + name='ssl_key', + field=models.ForeignKey(blank=True, help_text='SSL key to use, if necessary for the connection type.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='service_credentials', to='projects.sslkey'), + ), + migrations.AlterField( + model_name='servicecredential', + name='validated_at', + field=models.DateTimeField(blank=True, help_text='Credentials must be validated before we use them. This should normally be set by the system.', null=True), + ), + migrations.AlterField( + model_name='usercredential', + name='connection_overrides', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='These override settings in the ConnectionTemplate; the keys that should be set between a UserCredential and a ConnectionTemplate are defined in ConnectionType. This relationship is fully described in the ConnectionTemplate documentation.', null=True), + ), + migrations.AlterField( + model_name='usercredential', + name='ssl_key', + field=models.ForeignKey(blank=True, help_text='The SSL key to use, if needed for this credential.', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_credentials', to='projects.sslkey'), + ), + migrations.AlterField( + model_name='usercredential', + name='used_on', + field=models.JSONField(default=projects.models.connection.default_user_credential_usages, help_text='JSON list of strings, which are the services that use this credential.'), + ), + migrations.AlterField( + model_name='usercredential', + name='validated_at', + field=models.DateTimeField(blank=True, help_text='Only validated credentials will be used. This is usually set by the system once we have verified the credential works.', null=True), + ), + migrations.AlterField( + model_name='userenvironment', + name='code_server_share_code', + field=models.CharField(blank=True, help_text="This is automatically generated to be a random value on save() if code_server_access isn't ACCESS_PRIVATE", max_length=10, null=True, unique=True), + ), + migrations.AlterField( + model_name='userenvironment', + name='variables', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text='Dictionary of key-value pairs for environment variables', null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0106_alter_connectiontemplate_connection_user_and_more.py b/src/core/api/app/projects/migrations/0106_alter_connectiontemplate_connection_user_and_more.py new file mode 100644 index 00000000..ae877f32 --- /dev/null +++ b/src/core/api/app/projects/migrations/0106_alter_connectiontemplate_connection_user_and_more.py @@ -0,0 +1,69 @@ +# Generated by Django 5.0.7 on 2024-08-12 16:06 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0105_auto_20240701_2118"), + ] + + operations = [ + migrations.AlterField( + model_name="connectiontemplate", + name="connection_user", + field=models.CharField( + choices=[ + ("provided", "User provided"), + ("email_username", "Inferred from email's username"), + ("template", "Inferred from user info using a custom template"), + ("email", "Inferred from email address"), + ("email_uppercase", "Inferred from uppercase email address"), + ], + default="provided", + help_text="Can users configure their own credentials or are they restricted? See the Model class documentation for full details.", + max_length=20, + ), + ), + migrations.AlterField( + model_name="profileimageset", + name="airflow_requirements", + field=models.JSONField( + blank=True, + default=list, + help_text='List of python libs to be used in airflow images, e.g. ["Django==5.0.7"]', + null=True, + ), + ), + migrations.AlterField( + model_name="profileimageset", + name="ci_requirements", + field=models.JSONField( + blank=True, + default=list, + help_text='List of python libs to be used in ci images, e.g. ["Django==5.0.7"]', + null=True, + ), + ), + migrations.AlterField( + model_name="profileimageset", + name="code_server_requirements", + field=models.JSONField( + blank=True, + default=list, + help_text='List of python libs to be used in code server images, e.g. ["Django==5.0.7"]', + null=True, + ), + ), + migrations.AlterField( + model_name="profileimageset", + name="python_requirements", + field=models.JSONField( + blank=True, + default=list, + help_text='List of python libs to be used in both airflow and code server images, e.g. ["Django==5.0.7"]', + null=True, + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0107_profileimageset_images_logs.py b/src/core/api/app/projects/migrations/0107_profileimageset_images_logs.py new file mode 100644 index 00000000..6146ef38 --- /dev/null +++ b/src/core/api/app/projects/migrations/0107_profileimageset_images_logs.py @@ -0,0 +1,19 @@ +# Generated by Django 5.0.7 on 2024-09-03 23:06 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0106_alter_connectiontemplate_connection_user_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="profileimageset", + name="images_logs", + field=models.JSONField( + blank=True, default=dict, help_text="Kubernetes logs", null=True + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0107_project_secrets_backend_and_more.py b/src/core/api/app/projects/migrations/0107_project_secrets_backend_and_more.py new file mode 100644 index 00000000..e7d7040d --- /dev/null +++ b/src/core/api/app/projects/migrations/0107_project_secrets_backend_and_more.py @@ -0,0 +1,38 @@ +# Generated by Django 5.0.7 on 2024-09-02 12:56 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0106_alter_connectiontemplate_connection_user_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="project", + name="secrets_backend", + field=models.CharField( + choices=[ + ("datacoves", "Datacoves"), + ("aws_secrets_manager", "AWS Secrets Manager"), + ], + default="datacoves", + help_text="Secrets backend used to store/read secrets managed via admin.", + max_length=50, + ), + ), + migrations.AddField( + model_name="project", + name="secrets_backend_config", + field=core.fields.EncryptedJSONField( + blank=True, + default=dict, + editable=True, + help_text="Configuration needed to connect to chosen secrets backend", + null=True, + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0108_merge_20240905_1238.py b/src/core/api/app/projects/migrations/0108_merge_20240905_1238.py new file mode 100644 index 00000000..65e043d9 --- /dev/null +++ b/src/core/api/app/projects/migrations/0108_merge_20240905_1238.py @@ -0,0 +1,13 @@ +# Generated by Django 5.0.7 on 2024-09-05 12:38 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0107_profileimageset_images_logs"), + ("projects", "0107_project_secrets_backend_and_more"), + ] + + operations = [] diff --git a/src/core/api/app/projects/migrations/0109_project_azure_deploy_key_alter_project_ci_provider_and_more.py b/src/core/api/app/projects/migrations/0109_project_azure_deploy_key_alter_project_ci_provider_and_more.py new file mode 100644 index 00000000..6e93118f --- /dev/null +++ b/src/core/api/app/projects/migrations/0109_project_azure_deploy_key_alter_project_ci_provider_and_more.py @@ -0,0 +1,35 @@ +# Generated by Django 5.0.7 on 2024-09-19 22:32 + +import core.fields +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0108_merge_20240905_1238'), + ] + + operations = [ + migrations.AddField( + model_name='project', + name='azure_deploy_key', + field=models.ForeignKey(blank=True, help_text='Required for Azure certificate clone strategy', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='projects', to='projects.sslkey'), + ), + migrations.AlterField( + model_name='project', + name='ci_provider', + field=models.CharField(blank=True, choices=[('github', 'GitHub'), ('gitlab', 'Gitlab'), ('bamboo', 'Bamboo'), ('jenkins', 'Jenkins'), ('circleci', 'CircleCI'), ('other', 'Other'), ('azure_devops', 'Azure DevOps')], max_length=50, null=True), + ), + migrations.AlterField( + model_name='project', + name='clone_strategy', + field=models.CharField(choices=[('ssh_clone', 'SSH git clone'), ('http_clone', 'HTTP git clone'), ('azure_secret_clone', 'Azure Secret clone'), ('azure_certificate_clone', 'Azure Certificate clone')], default='ssh_clone', max_length=60), + ), + migrations.AlterField( + model_name='project', + name='deploy_credentials', + field=core.fields.EncryptedJSONField(blank=True, default=dict, editable=True, help_text="Required for HTTP clone stategy. This will be a JSON dictionary with keys 'git_username' and 'git_password'. This is also used by the Azure deployments to provide azure_tenant and oauth credentials", null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0110_project_uid.py b/src/core/api/app/projects/migrations/0110_project_uid.py new file mode 100644 index 00000000..6ba7d2a1 --- /dev/null +++ b/src/core/api/app/projects/migrations/0110_project_uid.py @@ -0,0 +1,19 @@ +# Generated by Django 5.0.7 on 2024-09-22 17:37 + +import uuid +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0109_project_azure_deploy_key_alter_project_ci_provider_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='project', + name='uid', + field=models.UUIDField(default=uuid.uuid4, editable=False, help_text="For dynamic authentication, we need to have a unique ID to reference this project that isn't sequential as a security token. Since this is internal only, we can restrict access to inside the Kubernetes cluster only, anad this is not accessible to end users so it should be sufficiently secure.", null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0111_auto_20240922_1739.py b/src/core/api/app/projects/migrations/0111_auto_20240922_1739.py new file mode 100644 index 00000000..c91c6340 --- /dev/null +++ b/src/core/api/app/projects/migrations/0111_auto_20240922_1739.py @@ -0,0 +1,21 @@ +# Generated by Django 5.0.7 on 2024-09-22 17:39 + +from django.db import migrations +import uuid + +def gen_uuid(apps, schema_editor): + MyModel = apps.get_model("projects", "Project") + for row in MyModel.objects.all(): + row.uid = uuid.uuid4() + row.save(update_fields=["uid"]) + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0110_project_uid'), + ] + + operations = [ + migrations.RunPython(gen_uuid, reverse_code=migrations.RunPython.noop), + ] diff --git a/src/core/api/app/projects/migrations/0112_auto_20240922_1739.py b/src/core/api/app/projects/migrations/0112_auto_20240922_1739.py new file mode 100644 index 00000000..89dab1dd --- /dev/null +++ b/src/core/api/app/projects/migrations/0112_auto_20240922_1739.py @@ -0,0 +1,18 @@ +# Generated by Django 5.0.7 on 2024-09-22 17:39 + +from django.db import migrations, models +import uuid + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0111_auto_20240922_1739'), + ] + + operations = [ + migrations.AlterField( + model_name='project', + name='uid', + field=models.UUIDField(default=uuid.uuid4, editable=False, help_text="For dynamic authentication, we need to have a unique ID to reference this project that isn't sequential as a security token. Since this is internal only, we can restrict access to inside the Kubernetes cluster only, anad this is not accessible to end users so it should be sufficiently secure.", unique=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0113_project_secrets_secondary_backend_and_more.py b/src/core/api/app/projects/migrations/0113_project_secrets_secondary_backend_and_more.py new file mode 100644 index 00000000..c51a95bf --- /dev/null +++ b/src/core/api/app/projects/migrations/0113_project_secrets_secondary_backend_and_more.py @@ -0,0 +1,24 @@ +# Generated by Django 5.0.7 on 2024-09-29 18:30 + +import core.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0112_auto_20240922_1739'), + ] + + operations = [ + migrations.AddField( + model_name='project', + name='secrets_secondary_backend', + field=models.CharField(blank=True, help_text="This is an Airflow class 'dot path' to enable the use of a secondary secret backend if the Datacoves Secret Backend is in use.", max_length=512, null=True), + ), + migrations.AddField( + model_name='project', + name='secrets_secondary_backend_config', + field=core.fields.EncryptedJSONField(blank=True, editable=True, help_text='When a secondary backend is chosen, this is the Airflow configuration block for the backend. It should be a bunch of key=value pairs.', null=True), + ), + ] diff --git a/src/core/api/app/projects/migrations/0114_alter_environmentintegration_service_and_more.py b/src/core/api/app/projects/migrations/0114_alter_environmentintegration_service_and_more.py new file mode 100644 index 00000000..336c935e --- /dev/null +++ b/src/core/api/app/projects/migrations/0114_alter_environmentintegration_service_and_more.py @@ -0,0 +1,47 @@ +# Generated by Django 5.0.7 on 2024-10-26 19:14 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("projects", "0113_project_secrets_secondary_backend_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="environmentintegration", + name="service", + field=models.CharField( + choices=[ + ("airbyte", "Airbyte"), + ("airflow", "Airflow"), + ("code-server", "Code-Server"), + ("datahub", "Datahub"), + ("dbt-docs", "Dbt-Docs"), + ("grafana", "Grafana"), + ("local-dbt-docs", "Local-Dbt-Docs"), + ("superset", "Superset"), + ], + max_length=50, + ), + ), + migrations.AlterField( + model_name="servicecredential", + name="service", + field=models.CharField( + choices=[ + ("airbyte", "Airbyte"), + ("airflow", "Airflow"), + ("code-server", "Code-Server"), + ("datahub", "Datahub"), + ("dbt-docs", "Dbt-Docs"), + ("grafana", "Grafana"), + ("local-dbt-docs", "Local-Dbt-Docs"), + ("superset", "Superset"), + ], + max_length=50, + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0115_servicecredential_delivery_mode.py b/src/core/api/app/projects/migrations/0115_servicecredential_delivery_mode.py new file mode 100644 index 00000000..0c079806 --- /dev/null +++ b/src/core/api/app/projects/migrations/0115_servicecredential_delivery_mode.py @@ -0,0 +1,18 @@ +# Generated by Django 5.0.7 on 2024-12-18 22:28 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0114_alter_environmentintegration_service_and_more'), + ] + + operations = [ + migrations.AddField( + model_name='servicecredential', + name='delivery_mode', + field=models.CharField(choices=[('env', 'Environment Variable'), ('connection', 'Airflow Connection')], default='env', max_length=16), + ), + ] diff --git a/src/core/api/app/projects/migrations/0116_alter_environmentintegration_service_and_more.py b/src/core/api/app/projects/migrations/0116_alter_environmentintegration_service_and_more.py new file mode 100644 index 00000000..863da5aa --- /dev/null +++ b/src/core/api/app/projects/migrations/0116_alter_environmentintegration_service_and_more.py @@ -0,0 +1,44 @@ +# Generated by Django 5.0.7 on 2025-01-23 00:17 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0115_servicecredential_delivery_mode"), + ] + + operations = [ + migrations.AlterField( + model_name="environmentintegration", + name="service", + field=models.CharField( + choices=[ + ("airbyte", "Airbyte"), + ("airflow", "Airflow"), + ("code-server", "Code-Server"), + ("datahub", "Datahub"), + ("dbt-docs", "Dbt-Docs"), + ("local-dbt-docs", "Local-Dbt-Docs"), + ("superset", "Superset"), + ], + max_length=50, + ), + ), + migrations.AlterField( + model_name="servicecredential", + name="service", + field=models.CharField( + choices=[ + ("airbyte", "Airbyte"), + ("airflow", "Airflow"), + ("code-server", "Code-Server"), + ("datahub", "Datahub"), + ("dbt-docs", "Dbt-Docs"), + ("local-dbt-docs", "Local-Dbt-Docs"), + ("superset", "Superset"), + ], + max_length=50, + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0117_alter_userenvironment_code_server_active.py b/src/core/api/app/projects/migrations/0117_alter_userenvironment_code_server_active.py new file mode 100644 index 00000000..7fd178ff --- /dev/null +++ b/src/core/api/app/projects/migrations/0117_alter_userenvironment_code_server_active.py @@ -0,0 +1,17 @@ +# Generated by Django 5.0.7 on 2025-04-08 22:43 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0116_alter_environmentintegration_service_and_more"), + ] + + operations = [ + migrations.AlterField( + model_name="userenvironment", + name="code_server_active", + field=models.BooleanField(default=False), + ), + ] diff --git a/src/core/api/app/projects/migrations/0117_profilefile_permissions.py b/src/core/api/app/projects/migrations/0117_profilefile_permissions.py new file mode 100644 index 00000000..5b04290f --- /dev/null +++ b/src/core/api/app/projects/migrations/0117_profilefile_permissions.py @@ -0,0 +1,27 @@ +# Generated by Django 5.0.7 on 2025-03-04 13:50 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0116_alter_environmentintegration_service_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="profilefile", + name="permissions", + field=models.CharField( + choices=[ + ("0o644", "644"), + ("0o755", "755"), + ("0o600", "600"), + ("0o700", "700"), + ], + default="0o644", + help_text="File permissions", + max_length=5, + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0117_release_airflow_providers.py b/src/core/api/app/projects/migrations/0117_release_airflow_providers.py new file mode 100644 index 00000000..49bd43e3 --- /dev/null +++ b/src/core/api/app/projects/migrations/0117_release_airflow_providers.py @@ -0,0 +1,19 @@ +# Generated by Django 5.0.7 on 2025-02-11 22:55 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("projects", "0116_alter_environmentintegration_service_and_more"), + ] + + operations = [ + migrations.AddField( + model_name="release", + name="airflow_providers", + field=models.JSONField( + default=dict, help_text="Airflow library providers dictionary." + ), + ), + ] diff --git a/src/core/api/app/projects/migrations/0118_merge_20250304_2206.py b/src/core/api/app/projects/migrations/0118_merge_20250304_2206.py new file mode 100644 index 00000000..8bc1c13f --- /dev/null +++ b/src/core/api/app/projects/migrations/0118_merge_20250304_2206.py @@ -0,0 +1,14 @@ +# Generated by Django 5.0.7 on 2025-03-04 22:06 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0117_profilefile_permissions'), + ('projects', '0117_release_airflow_providers'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0119_merge_20250410_1701.py b/src/core/api/app/projects/migrations/0119_merge_20250410_1701.py new file mode 100644 index 00000000..0de3eac7 --- /dev/null +++ b/src/core/api/app/projects/migrations/0119_merge_20250410_1701.py @@ -0,0 +1,14 @@ +# Generated by Django 5.0.7 on 2025-04-10 17:01 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0117_alter_userenvironment_code_server_active'), + ('projects', '0118_merge_20250304_2206'), + ] + + operations = [ + ] diff --git a/src/core/api/app/projects/migrations/0120_alter_profilefile_permissions.py b/src/core/api/app/projects/migrations/0120_alter_profilefile_permissions.py new file mode 100644 index 00000000..9d56c49f --- /dev/null +++ b/src/core/api/app/projects/migrations/0120_alter_profilefile_permissions.py @@ -0,0 +1,18 @@ +# Generated by Django 5.0.7 on 2025-04-10 17:27 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0119_merge_20250410_1701'), + ] + + operations = [ + migrations.AlterField( + model_name='profilefile', + name='permissions', + field=models.CharField(choices=[('0o644', '644'), ('0o744', '744'), ('0o600', '600'), ('0o700', '700')], default='0o644', help_text='File permissions', max_length=5), + ), + ] diff --git a/src/core/api/app/projects/migrations/__init__.py b/src/core/api/app/projects/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/projects/models/__init__.py b/src/core/api/app/projects/models/__init__.py new file mode 100644 index 00000000..0c7c3d1d --- /dev/null +++ b/src/core/api/app/projects/models/__init__.py @@ -0,0 +1,9 @@ +from .connection import * # noqa: F401,F403 +from .environment import * # noqa: F401,F403 +from .environment_integration import * # noqa: F401,F403 +from .profile import * # noqa: F401,F403 +from .project import * # noqa: F401,F403 +from .release import * # noqa: F401,F403 +from .repository import * # noqa: F401,F403 +from .user_environment import * # noqa: F401,F403 +from .webhook import * # noqa: F401,F403 diff --git a/src/core/api/app/projects/models/connection.py b/src/core/api/app/projects/models/connection.py new file mode 100644 index 00000000..96bf06a9 --- /dev/null +++ b/src/core/api/app/projects/models/connection.py @@ -0,0 +1,723 @@ +import json +import re + +from autoslug import AutoSlugField +from codegen.templating import build_user_context +from core.fields import EncryptedJSONField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import models +from django.utils.text import slugify + +from lib.dicts import deep_merge + + +class ConnectionTypeManager(models.Manager): + def create_defaults(self): + self.update_or_create( + slug=self.model.TYPE_SNOWFLAKE, + defaults={ + "account": None, + "name": "Snowflake", + "required_fieldsets": [ + [ + "account", + "warehouse", + "role", + "database", + "schema", + "user", + "password", + "mfa_protected", + ], + [ + "account", + "warehouse", + "role", + "database", + "schema", + "user", + "ssl_key_id", + "mfa_protected", + ], + ], + }, + ) + + self.update_or_create( + slug=self.model.TYPE_REDSHIFT, + defaults={ + "account": None, + "name": "Redshift", + "required_fieldsets": [ + ["host", "user", "password", "database", "schema"] + ], + }, + ) + + self.update_or_create( + slug=self.model.TYPE_BIGQUERY, + defaults={ + "account": None, + "name": "Bigquery", + "required_fieldsets": [["keyfile_json", "dataset"]], + }, + ) + + self.update_or_create( + slug=self.model.TYPE_DATABRICKS, + defaults={ + "account": None, + "name": "Databricks", + "required_fieldsets": [["schema", "host", "http_path", "token"]], + }, + ) + + +def connectiontype_slug(instance): + if instance.account: + return f"{instance.name}-{instance.account.slug}" + else: + return instance.name + + +class ConnectionType(AuditModelMixin, DatacovesModel): + """Connection types are used by Connection Templates to determine what + fields are needed for a given service connection + + These are the fields needed to make a connection. It uses a custom + manager called ConnectionTypeManager which provides an + ConnectionType.objects.create_defaults() call which creates the + default ConnectionTypes for each of our types defined below. + + create_defaults is idempotent (safe to re-run even if defaults are + already created). + + ========= + Constants + ========= + + - TYPE_SNOWFLAKE + - TYPE_REDSHIFT + - TYPE_BIGQUERY + - TYPE_DATABRICKS + """ + + TYPE_SNOWFLAKE = "snowflake" + TYPE_REDSHIFT = "redshift" + TYPE_BIGQUERY = "bigquery" + TYPE_DATABRICKS = "databricks" + + name = models.CharField(max_length=130) + slug = AutoSlugField( + populate_from=connectiontype_slug, + unique=True, + help_text="The slug is used as the type, unlike many models where " + "slug and type are different fields.", + ) + account = models.ForeignKey( + "users.Account", + on_delete=models.CASCADE, + related_name="connection_types", + null=True, + blank=True, + help_text="If null, this is a system level connection type.", + ) + + required_fieldsets = models.JSONField( + default=list, + null=True, + blank=True, + help_text='list of lists: [["user", "password", "account"], ' + '["user", "token", "account"]]', + ) + + objects = ConnectionTypeManager() + + def __str__(self): + return self.name + + @property + def is_snowflake(self) -> bool: + """True if this is a snowflake connection type""" + return self.slug == self.TYPE_SNOWFLAKE + + @property + def is_redshift(self) -> bool: + """True if this is a redshift connection type""" + return self.slug == self.TYPE_REDSHIFT + + @property + def is_bigquery(self) -> bool: + """True if this is a bigquery connection type""" + return self.slug == self.TYPE_BIGQUERY + + @property + def is_databricks(self) -> bool: + """True if this is a databricks connection type""" + return self.slug == self.TYPE_DATABRICKS + + +class ConnectionTemplate(AuditModelMixin, DatacovesModel): + """Connection templates are used to provide basic information about + service connections + + These provide default values for the different fields needed to + connect to a service. These field names are defined by + :model:`projects.ConnectionType`. + + :model:`projects.ServiceCredential` and :model:`projects.UserCredential` + both use this model as a foundation, providing overrides for the default + values as needed and linking in the secrets needed to actually connect + to the service. + + ========= + Constants + ========= + + - CONNECTION_USER_PROVIDED + - CONNECTION_USER_FROM_EMAIL_USERNAME + - CONNECTION_USER_FROM_TEMPLATE + - CONNECTION_USERS - a tuple of tuple pairs for populating a select box + + These constants are used by the 'connection_user' field; these do not + apply to ServiceCredential, but they do apply to UserCredential. If + CONNECTION_USER_PROVIDED is used, the user may set up their own + credentials however they wish. + + If CONNECTION_USER_FROM_EMAIL_USERNAME is used, they are forced to use + their email as a username and default credentials are used. + + If CONNECTION_USER_FROM_TEMPLATE is used, we will use a + :model:`codegen.Template` of type CONTEXT_TYPE_USER or CONTEXT_TYPE_NONE + and again not give the user an option. + + This only applies if for_users is True. If for_users is True, and + user credentials are created, and then later for_users is turned to + False, 'save' will delete the UserCredential records. + + ======= + Methods + ======= + + - **clean()** - Private method to do validation + - **save(...)** - Overriden to run clean() validation and to delete + UserCredential records associated with this template if for_users + is set to False. + """ + + CONNECTION_USER_PROVIDED = "provided" + CONNECTION_USER_FROM_EMAIL_USERNAME = "email_username" + CONNECTION_USER_FROM_TEMPLATE = "template" + CONNECTION_USER_FROM_EMAIL = "email" + CONNECTION_USER_FROM_EMAIL_UPPERCASE = "email_uppercase" + CONNECTION_USERS = ( + ( + CONNECTION_USER_PROVIDED, + "User provided", + ), + ( + CONNECTION_USER_FROM_EMAIL_USERNAME, + "Inferred from email's username", + ), + ( + CONNECTION_USER_FROM_TEMPLATE, + "Inferred from user info using a custom template", + ), + ( + CONNECTION_USER_FROM_EMAIL, + "Inferred from email address", + ), + ( + CONNECTION_USER_FROM_EMAIL_UPPERCASE, + "Inferred from uppercase email address", + ), + ) + project = models.ForeignKey( + "Project", on_delete=models.CASCADE, related_name="connection_templates" + ) + type = models.ForeignKey( + ConnectionType, + on_delete=models.CASCADE, + related_name="connection_templates", + ) + name = models.CharField(max_length=130) + connection_details = EncryptedJSONField( + default=dict, + help_text="This maps the keys needed (which are defined in " + "ConnectionType.required_fieldsets) to default values which may " + "be overriden by ServiceCredential or UserCredential.", + ) + for_users = models.BooleanField( + default=True, help_text="Can users set this credential up for themselves?" + ) + connection_user = models.CharField( + max_length=20, + default=CONNECTION_USER_PROVIDED, + choices=CONNECTION_USERS, + help_text="Can users configure their own credentials or are they " + "restricted? See the Model class documentation for full details.", + ) + connection_user_template = models.ForeignKey( + "codegen.Template", + on_delete=models.SET_NULL, + null=True, + blank=True, + help_text="Only used for custom templates connection user.", + ) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["project", "name"], + name="Project and name uniqueness", + ) + ] + + @property + def user_credentials_count(self): + """Number of associated user credentials""" + return self.user_credentials.count() + + @property + def service_credentials_count(self): + """Number of associated service credentials""" + return self.service_credentials.count() + + @property + def type_slug(self): + """A short code type for this connection template""" + return self.type.slug + + def __str__(self): + return self.name + + def clean(self): + """Ensures that the connection template belongs to the same account + as the type, and makes sure that if we have a connection_user_template, + that the template is the correct type. + + May through ValidationError if there is a problem. + """ + + if self.type.account and self.type.account != self.project.account: + raise ValidationError( + "Connection type's account and project's account can't be different" + ) + if self.connection_user_template: + self.connection_user_template.is_enabled_for(__class__.__name__) + if self.connection_user_template.context_type not in ( + self.connection_user_template.CONTEXT_TYPE_USER, + self.connection_user_template.CONTEXT_TYPE_NONE, + ): + raise ValidationError( + "Template for user field must be of context type 'User' or 'None'." + ) + + def save(self, *args, **kwargs): + """Wrapper for save to run our validation, and delete user credentials + if for_users is turned off.""" + + self.clean() + if self.pk: + if not self.for_users and self.user_credentials_count > 0: + self.user_credentials.all().delete() + return super().save(*args, **kwargs) + + +class ServiceCredential(AuditModelMixin, DatacovesModel): + """ServiceCredentials are used for shared services provided by the system + + These are, specifically, the services in settings.SERVICES + + These link specific credentials to environments in the project. + + ========= + Constants + ========= + + - SERVICES - a list of tuple pairs for populating a select box + + Delivery modes are for the delivery_mode field, which is how we are + 'delivering' the variable for usage; via environment variables or + are we injecting an airflow connection? + + - DELIVERY_MODE_ENV - We will set secrets via environment variables. + - DELIVERY_MODE_CONNECTION - We will push this into an Airflow Connection. + - DELIVERY_MODES - a list of tuple pairs for populating a select box + + ======= + Methods + ======= + + - **clean()** - Private method to implemeent some pre-save validation + - **combined_connection()** - Merges defaults with overrides and returns + the result -- if you are consuming this ServiceCredential, you will + want to use this method to get the connection settings. + - **save(...)** - Overridden to use clean() validation. + - **get_airflow_connection()** - Returns a dictionary representing this + connection in a format that can be pushed to Airflow's API. + """ + + SERVICES = [(service, service.title()) for service in sorted(settings.SERVICES)] + + DELIVERY_MODE_ENV = "env" + DELIVERY_MODE_CONNECTION = "connection" + DELIVERY_MODES = ( + ( + DELIVERY_MODE_ENV, + "Environment Variable", + ), + ( + DELIVERY_MODE_CONNECTION, + "Airflow Connection", + ), + ) + + name = models.CharField(max_length=50, default="main") + environment = models.ForeignKey( + "Environment", on_delete=models.CASCADE, related_name="service_credentials" + ) + connection_template = models.ForeignKey( + ConnectionTemplate, on_delete=models.CASCADE, related_name="service_credentials" + ) + connection_overrides = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="These override settings in the ConnectionTemplate; the " + "keys that should be set between a ServiceCredential and a " + "ConnectionTemplate are defined in ConnectionType. This relationship " + "is fully described in the ConnectionTemplate documentation.", + ) + ssl_key = models.ForeignKey( + "SSLKey", + on_delete=models.SET_NULL, + related_name="service_credentials", + null=True, + blank=True, + help_text="SSL key to use, if necessary for the connection type.", + ) + service = models.CharField(max_length=50, choices=SERVICES) + validated_at = models.DateTimeField( + blank=True, + null=True, + help_text="Credentials must be validated before we use them. This " + "should normally be set by the system.", + ) + delivery_mode = models.CharField( + max_length=16, + choices=DELIVERY_MODES, + null=False, + blank=False, + default=DELIVERY_MODE_ENV, + ) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["environment", "service", "name"], + name="Environment service credential uniqueness", + ) + ] + + def __str__(self): + return f"{self.environment}:{self.service}:{self.name}" + + def clean(self): + """Ensure the environment and connection template are in the same + project. Throws ValidationError if they are not. + """ + + if self.environment.project != self.connection_template.project: + raise ValidationError( + "Environment and Connection must belong to the same Project" + ) + + validate_connection_name(self.name) + + def combined_connection(self): + """Combine the overrides with the connection template defaults and + return the combined connection parameters.""" + + return deep_merge( + self.connection_overrides, self.connection_template.connection_details + ) + + def save(self, *args, **kwargs): + """Override save to do validation via clean()""" + + self.clean() + return super().save(*args, **kwargs) + + @property + def public_ssl_key(self): + """The public SSL key if set, None if not set""" + + if self.ssl_key: + return self.ssl_key.public + return None + + def get_airflow_connection(self) -> dict: + """This returns the connection as an airflow connection dictionary + that is valid for whatever connection type this is.""" + + # Basic sanity check. If you want to check if the credential is + # validated or has the correct delivery mode, you should check + # that before this call + if self.service != settings.SERVICE_AIRFLOW: + raise RuntimeError("Only works for airflow connections") + + # Make sure we support this connection type + conn_type = self.connection_template.type.slug + + if not hasattr(self, f"_get_airflow_connection_for_{conn_type}"): + raise RuntimeError(f"Type {conn_type} not yet supported") + + conn = self.combined_connection() + + # Common fields for all connection types + ret = { + "connection_id": self.name, + "conn_type": ( + conn_type + if conn_type != ConnectionType.TYPE_BIGQUERY + else "gcpbigquery" + ), # It's fun to be a little different, right? + "description": "Managed by the 'Service Credentials' page in Launchpad", + } + + ret.update(getattr(self, f"_get_airflow_connection_for_{conn_type}")(conn)) + + return ret + + def _get_airflow_connection_for_snowflake(self, conn: dict) -> dict: + """Do not call this method directly; it operates in support of + get_airflow_connection and will only return a dictionary of fields + specific to this connection type and not a fully fleshed out + connection dictionary. + """ + + extra = { + "account": conn.get("account", ""), + "database": conn.get("database", ""), + "warehouse": conn.get("warehouse", ""), + "role": conn.get("role", ""), + "mfa_protected": conn.get("mfa_protected", False), + } + + ret = { + "login": conn.get("user", ""), + "schema": conn.get("schema", ""), + } + + if self.ssl_key: + extra["private_key_content"] = self.ssl_key.private + ret["password"] = "" + elif "password" in conn: + ret["password"] = conn["password"] + else: + raise RuntimeError( + "Only password or key based connections work with this feature." + ) + + ret["extra"] = json.dumps(extra) + + return ret + + def _get_airflow_connection_for_redshift(self, conn: dict) -> dict: + """Do not call this method directly; it operates in support of + get_airflow_connection and will only return a dictionary of fields + specific to this connection type and not a fully fleshed out + connection dictionary. + """ + + return { + "host": conn.get("host"), + "schema": conn.get("schema"), + "password": conn.get("password"), + "login": conn.get("user"), + "extra": json.dumps( + { + "database": conn.get("database"), + } + ), + } + + def _get_airflow_connection_for_databricks(self, conn: dict) -> dict: + """Do not call this method directly; it operates in support of + get_airflow_connection and will only return a dictionary of fields + specific to this connection type and not a fully fleshed out + connection dictionary. + """ + + return { + "host": conn.get("host"), + "schema": conn.get("schema"), + "extra": json.dumps( + { + "token": conn.get("token"), + "http_path": conn.get("http_path"), + } + ), + } + + def _get_airflow_connection_for_bigquery(self, conn: dict) -> dict: + """Do not call this method directly; it operates in support of + get_airflow_connection and will only return a dictionary of fields + specific to this connection type and not a fully fleshed out + connection dictionary. + """ + + return { + "extra": json.dumps( + { + "keyfile_json": conn.get("keyfile_json"), + "dataset": conn.get("dataset"), + } + ), + } + + +def default_user_credential_usages(): + return ["code-server.dbt-profile"] + + +def validate_connection_name(name): + """Ensure names contain only alphanumeric, underscores and whitespaces""" + valid_naming_pattern = r"^[a-zA-Z0-9_\s]+$" + if not re.match(valid_naming_pattern, name): + raise ValidationError( + f"Name ({name}) must consist of alphanumeric characters, underscores and spaces" + ) + + +class UserCredential(AuditModelMixin, DatacovesModel): + """UserCredential is used for services which are in user environments + + For example, Code Server could use UserCredential. + + ======= + Methods + ======= + + - **clean()** - Private method to implemeent some pre-save validation + - **combined_connection()** - Merges defaults with overrides and returns + the result -- if you are consuming this UserCredential, you will + want to use this method to get the connection settings. + - **save(...)** - Overridden to use clean() validation. + """ + + name = models.CharField(max_length=130, default="dev") + user = models.ForeignKey( + "users.User", on_delete=models.CASCADE, related_name="credentials" + ) + environment = models.ForeignKey( + "Environment", on_delete=models.CASCADE, related_name="user_credentials" + ) + connection_template = models.ForeignKey( + ConnectionTemplate, on_delete=models.CASCADE, related_name="user_credentials" + ) + connection_overrides = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="These override settings in the ConnectionTemplate; the " + "keys that should be set between a UserCredential and a " + "ConnectionTemplate are defined in ConnectionType. This relationship " + "is fully described in the ConnectionTemplate documentation.", + ) + ssl_key = models.ForeignKey( + "SSLKey", + on_delete=models.SET_NULL, + related_name="user_credentials", + null=True, + blank=True, + help_text="The SSL key to use, if needed for this credential.", + ) + validated_at = models.DateTimeField( + blank=True, + null=True, + help_text="Only validated credentials will be used. This is usually " + "set by the system once we have verified the credential works.", + ) + used_on = models.JSONField( + default=default_user_credential_usages, + help_text="JSON list of strings, which are the services that use " + "this credential.", + ) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["user", "environment", "name"], + name="User credential uniqueness", + ) + ] + + def __str__(self): + return f"{self.user}:{self.environment}:{(',').join(self.used_on)}" + + @property + def slug(self): + """Produce a slug based on the name using slugify""" + return slugify(self.name) + + def combined_connection(self): + """Combine the overrides with the connection template defaults and + return the combined connection parameters. This also processes + the 'user' key in the dictionary for + CONNECTION_USER_FROM_EMAIL_USERNAME and CONNECTION_USER_FROM_TEMPLATE + """ + + details = deep_merge( + self.connection_overrides, self.connection_template.connection_details + ) + if ( + self.connection_template.connection_user + == ConnectionTemplate.CONNECTION_USER_FROM_EMAIL_USERNAME + ): + details["user"] = self.user.email_username + elif ( + self.connection_template.connection_user + == ConnectionTemplate.CONNECTION_USER_FROM_TEMPLATE + ): + context = build_user_context(self.user) + details["user"] = self.connection_template.connection_user_template.render( + context + ) + return details + + def clean(self): + """Do validation; this includes making sure that the project of + the connection template and environment are the same. Also + throws an error if the connection template is for_users = False, + and finally verifies all items in 'used on' exists in + settings.USER_SERVICES + + Throws ValidationError if there is a problem + """ + + if self.environment.project != self.connection_template.project: + raise ValidationError( + "Environment and Connection must belong to the same Project" + ) + if not self.connection_template.for_users: + raise ValidationError("Connection must be enabled for users") + + for usage in self.used_on: + service, _ = usage.split(".") + if service not in settings.USER_SERVICES: + raise ValidationError( + f"Service '{service}' not recognized as a user service." + ) + + validate_connection_name(self.name) + + def save(self, *args, **kwargs): + """Override save to do validation via clean()""" + + self.clean() + return super().save(*args, **kwargs) diff --git a/src/core/api/app/projects/models/environment.py b/src/core/api/app/projects/models/environment.py new file mode 100644 index 00000000..145baba7 --- /dev/null +++ b/src/core/api/app/projects/models/environment.py @@ -0,0 +1,962 @@ +import random +import string + +from core.fields import EncryptedJSONField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.contrib.auth.models import ContentType, Group, Permission +from django.core.exceptions import ValidationError +from django.db import IntegrityError, models, transaction +from django.db.models import Q +from integrations.models import Integration +from users.models import ( + ExtendedGroup, + User, + make_permission_name, + parse_permission_name, +) + +from .environment_integration import EnvironmentIntegration +from .profile import Profile +from .user_environment import UserEnvironment + + +def generate_env_slug(): + return "".join( + [random.choice(string.ascii_lowercase) for _ in range(3)] + + [random.choice(string.digits) for _ in range(3)] + ) + + +def default_docker_config(): + return settings.DEFAULT_DOCKER_CONFIG + + +DOCKER_CONFIG_SECRET_NAME_DEFAULT = "docker-config-datacovesprivate" +NAMESPACE_PREFIX = "dcw-" + + +def default_services(): + return { + settings.SERVICE_AIRBYTE: {"enabled": False, "valid": True}, + settings.SERVICE_AIRFLOW: {"enabled": False, "valid": True}, + settings.SERVICE_CODE_SERVER: { + "enabled": False, + "valid": True, + }, + settings.SERVICE_DBT_DOCS: {"enabled": False, "valid": True}, + settings.SERVICE_SUPERSET: {"enabled": False, "valid": True}, + settings.SERVICE_DATAHUB: {"enabled": False, "valid": True}, + } + + +def default_internal_services(): + return { + settings.INTERNAL_SERVICE_MINIO: {"enabled": False}, + settings.INTERNAL_SERVICE_ELASTIC: {"enabled": False}, + settings.INTERNAL_SERVICE_NEO4J: {"enabled": False}, + settings.INTERNAL_SERVICE_POSTGRESQL: {"enabled": False}, + settings.INTERNAL_SERVICE_KAFKA: {"enabled": False}, + settings.INTERNAL_SERVICE_GRAFANA: {"enabled": False}, + } + + +def default_cluster(): + from clusters.models import Cluster + + return Cluster.objects.first() + + +def default_release(): + from projects.models import Release + + return Release.objects.get_latest() + + +def default_profile(): + return Profile.objects.get(slug="default") + + +class Environment(AuditModelMixin, DatacovesModel): + """Environments are the binding element for a group of services + + They contain the service configurations and also are the parent model + for :model:`projects.UserEnvironment` objects. + + ========= + Constants + ========= + + ----------------- + Environment Types + ----------------- + + - TYPE_DEV + - TYPE_TEST + - TYPE_PROD + - TYPES - tuple of tuple pairs for populating select boxes + + ----------------- + Update Strategies + ----------------- + + - UPDATE_LATEST - Update the environment updated to the latest version + - UPDATE_FREEZED - Do not update environment + - UPDATE_MINOR_LATEST - Update the environment to the latest minor + version for the current major version + - UPDATE_MAJOR_LATEST - Update to the latest major version + - UPDATE_STRATEGIES - Tuple of tuple pairs for populating a select box + + -------------------- + Environment Profiles + -------------------- + + - RELEASE_PROFILE_DBT_SNOWFLAKE + - RELEASE_PROFILE_DBT_REDSHIFT + - RELEASE_PROFILE_DBT_BIGQUERY + - RELEASE_PROFILE_DBT_DATABRICKS + - RELEASE_PROFILES - tuple of tuple pairs for populating a select box + + Environment profiles are used to determine what basic integration an + environment will use -- snowflake, redshift, etc. This controls which + docker images will be used for the environment's services. + + ======= + Methods + ======= + + - **clean()** - Private method for validation + - **save(...)** - Overrides save to run validationm, set up some defaults, + and retry creating the environment with different slugs if the one + desired is already in use (or unset). + - **create_permissions()** - Create permissions for this environment. + This is run by the post-save hook on environment create to make sure + there is a set of permissions for user/group assignments for a new + environment. + - **bump_release()** - Handles release updates for this environment based + on the update_strategy. Returns True if the environment was updated, + or False if not. + - **create_environment_groups()** - The same as create_permissions, + except it creates the groups. Also called by a post-save hook. + - **from_permission_names(permission_names)** - Static method. + Returns a queryset for environments by permission name. + - **is_service_enabled(service_name)** - Returns boolean True if + service is enabled (including internal services) + - **is_service_valid(service_name)** - Returns boolean True if + service is valid + - **is_service_enabled_and_valid(service_name)** - Returns the + 'and' of the above two "is service" calls. + - **is_internal_service_enabled(service_name)** - Returns boolean True + if internal service is enabled + - **enabled_and_valid_services()** - Returns a set of services that + are enabled and valid. + - **get_service_image(service, repo, tag_prefix=None, + include_registry=True)** - Gets the service docker image for the + given service and repository + - **get_image(repo, use_release_profile=False)** - Gets an image for + the given repository, optionally using the profile image set. + - **get_plan()** - Fetches the plan associated with this environment + by way of the project and account. + - **get_quota()** - Get the combined quota, using the plan as the basis + but applying overrides from the environment level. + - **get_user_services(user)** - Returns a list of user-level services + available to a given user + """ + + TYPE_DEV = "dev" + TYPE_TEST = "test" + TYPE_PROD = "prod" + TYPES = ( + ( + TYPE_DEV, + "dev", + ), + ( + TYPE_TEST, + "test", + ), + ( + TYPE_PROD, + "prod", + ), + ) + + UPDATE_LATEST = "latest" + UPDATE_FREEZED = "freezed" + UPDATE_MINOR_LATEST = "minor" + UPDATE_MAJOR_LATEST = "major" + UPDATE_STRATEGIES = ( + ( + UPDATE_LATEST, + "Update to latest", + ), + ( + UPDATE_FREEZED, + "Freeze release", + ), + ( + UPDATE_MINOR_LATEST, + "Update to latest minor patch", + ), + ( + UPDATE_MAJOR_LATEST, + "Update to latest major patch", + ), + ) + RELEASE_PROFILE_DBT_SNOWFLAKE = "dbt-snowflake" + RELEASE_PROFILE_DBT_REDSHIFT = "dbt-redshift" + RELEASE_PROFILE_DBT_BIGQUERY = "dbt-bigquery" + RELEASE_PROFILE_DBT_DATABRICKS = "dbt-databricks" + RELEASE_PROFILES = ( + ( + RELEASE_PROFILE_DBT_SNOWFLAKE, + "dbt-snowflake", + ), + ( + RELEASE_PROFILE_DBT_REDSHIFT, + "dbt-redshift", + ), + ( + RELEASE_PROFILE_DBT_BIGQUERY, + "dbt-bigquery", + ), + ( + RELEASE_PROFILE_DBT_DATABRICKS, + "dbt-databricks", + ), + ) + + slug = models.CharField(max_length=6, unique=True, default=generate_env_slug) + name = models.CharField(max_length=50) + type = models.CharField(max_length=60, choices=TYPES, default=TYPE_DEV) + sync = models.BooleanField( + default=False, + help_text="Does the environment need to be sync'd? This will " + "set up the environment and start pods up as needed.", + ) + + project = models.ForeignKey( + "Project", on_delete=models.CASCADE, related_name="environments" + ) + + cluster = models.ForeignKey( + "clusters.Cluster", + on_delete=models.PROTECT, + related_name="environments", + default=default_cluster, + ) + + # The current environment's release + release = models.ForeignKey( + "Release", + on_delete=models.PROTECT, + related_name="environments", + default=default_release, + ) + release_profile = models.CharField( + max_length=50, + choices=RELEASE_PROFILES, + default="dbt-snowflake", + help_text="We have different docker images for different backends; " + "the release profile selects which set of docker images are used.", + ) + + update_strategy = models.CharField( + max_length=10, + choices=UPDATE_STRATEGIES, + default=UPDATE_FREEZED, + help_text="How will system updates be applied to this environment.", + ) + + # Environment profile + profile = models.ForeignKey( + "Profile", + on_delete=models.CASCADE, + related_name="environments", + default=default_profile, + help_text="Profiles control files that are automatically generated " + "for the environment and some credential items. They are also " + "the linkage to Profile Image Sets which can control what images and " + "python libraries are available to an environment.", + ) + + # FIXME: Move dbt_home_path to settings + dbt_home_path = models.CharField(max_length=4096, default="", blank=True) + # FIXME: Move dbt_profiles_dir to airflow_config + dbt_profiles_dir = models.CharField( + max_length=4096, default="automate/dbt", blank=True + ) + + # Docker + docker_registry = models.CharField( + max_length=253, + blank=True, + help_text="If not provided, this defaults to dockerhub.", + ) + docker_config_secret_name = models.CharField( + max_length=253, default=DOCKER_CONFIG_SECRET_NAME_DEFAULT, null=True, blank=True + ) + + services = models.JSONField( + default=default_services, + help_text="A map of services. The keys are the names of enabled " + "services. Values are dictionaries, currently empty. May be used in " + "the future to specify that a service is paused due to an expired " + "trial, etc. For most configuration, though, think first of adding " + "fields to Environment and Workspace spec.", + ) + + internal_services = models.JSONField( + default=default_internal_services, + help_text="Enable or disable certain internal services. This is " + "a dictionary that maps service names to dictionaries that have " + "configuration for each service; each has a configuration key " + "'enabled' which may be true or false.", + ) + + airbyte_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Airbyte-specfic configuration items.", + ) + + airflow_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Airflow-specific configuration items.", + ) + + superset_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Superset-specific configuration items.", + ) + + dbt_docs_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of DBT Doc-specific configuration items.", + ) + + code_server_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Code Server-specific configuration items.", + ) + + grafana_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Grafana-specific configuration items.", + ) + + minio_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Minio-specific configuration items.", + ) + + elastic_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Elastic-specific configuration items.", + ) + + neo4j_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Neo4J-specific configuration items.", + ) + + postgresql_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of PostgreSQL-specific configuration items.", + ) + + kafka_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of Kafka-specific configuration items.", + ) + + datahub_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="A dictionary of DataHub-specific configuration items.", + ) + + docker_config = EncryptedJSONField( + default=default_docker_config, + blank=True, + null=True, + help_text="An empty docker_config means core-api is not responsible " + "for creating the secret, another system creates the secret named " + "docker_config_secret_name.", + ) + + # Pomerium + pomerium_config = EncryptedJSONField( + default=dict, + null=True, + blank=True, + help_text="A dictionary of Pomerium-specific configuration items.", + ) + + # Environment settings + settings = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="A dictionary of general Environment settings.", + ) + + # + workspace_generation = models.IntegerField( + null=True, + help_text="The last workspace's (kubernetes resource) generation " "we wrote.", + ) + quotas = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Quota configuration dictionary. This overrides whatever " + "is set on the plan level. See the Plan model documentation for more " + "details about how quotas work.", + ) + variables = EncryptedJSONField( + default=dict, + null=True, + blank=True, + help_text="Dictionary of environment variables to provide to the " + "pods; these are key-value pairs.", + ) + + def __str__(self): + return self.slug + + def clean(self): + """Handle validation for the object. Prevents test/prod environments + from running code server, checks release compatability, checks + profile image set compatibility, and lastly makes sure that the + profile and project are in the same account as the environment. + """ + + if self.type != self.TYPE_DEV and self.services["code-server"]["enabled"]: + raise ValidationError("Test and prod environments cannot run code server.") + + if self.pk: + old_version = Environment.objects.get(id=self.pk) + if old_version.release != self.release and not self.release.is_supported( + self + ): + raise ValidationError( + f"Environment release {self.release} is not compatible with " + f"cluster release {self.cluster.release}." + ) + + if self.profile.image_set and not self.profile.image_set.is_compatible( + self.release + ): + raise ValidationError( + f"Profile image set {self.profile}'s release is not compatible " + f"with environment's release {self.release}." + ) + if self.profile.account and self.project.account != self.profile.account: + raise ValidationError( + "Environment and Profile must belong to the same Account" + ) + + def save(self, *args, **kwargs): + """Retries with new slugs if duplicated. Does validation via + clean. Sets up some defaults if needed. + """ + + # update default services + services = default_services() + services.update(self.services) + self.services = services + if not self.pk: + # use cluster defaults when missing docker registry settings + if not self.docker_registry and self.cluster.docker_registry: + self.docker_registry = self.cluster.docker_registry + if ( + not self.docker_config_secret_name + or self.docker_config_secret_name == DOCKER_CONFIG_SECRET_NAME_DEFAULT + ) and self.cluster.docker_config_secret_name: + self.docker_config_secret_name = self.cluster.docker_config_secret_name + self.clean() + # retry slug generation + retries = 5 + exception = None + while retries > 0: + retries -= 1 + try: + with transaction.atomic(): + return super().save(*args, **kwargs) + except IntegrityError as e: + if "projects_environment_slug_key" in str(e): + exception = e + self.slug = generate_env_slug() + else: + raise e + if exception: + raise exception + + @property + def has_code_server(self): + return self.type == Environment.TYPE_DEV + + def create_permissions(self): + """Create permissions for this environment. This is run by the + post-save hook on environment create to make sure there is a + set of permissions for user/group assignments for a new environment. + """ + + content_type = ContentType.objects.get(app_label="users", model="account") + for resource in settings.WORKBENCH_RESOURCES: + for action in (settings.ACTION_READ, settings.ACTION_WRITE): + name = make_permission_name( + resource, + action, + account_slug=self.project.account.slug, + project_slug=self.project.slug, + environment_slug=self.slug, + ) + + Permission.objects.get_or_create( + name=name, + content_type=content_type, + defaults={"codename": name[:100]}, + ) + for resource in settings.DBT_API_RESOURCES: + name = resource.format( + cluster_domain=self.cluster.domain, env_slug=self.slug + ) + Permission.objects.get_or_create( + name=name, + content_type=content_type, + defaults={"codename": name[:100]}, + ) + + # Create the service user if necessary and make a system token for + # it. + if "system_api_key" not in self.settings: + from clusters.adapters.airflow import AirflowAdapter + from iam.models import DatacovesToken + + sa_user = AirflowAdapter.setup_service_account(self) + instance, token = DatacovesToken.objects.create( + user=sa_user, + expiry=None, + prefix="", + type=DatacovesToken.TYPE_ENVIRONMENT, + environment=self, + is_system=True, + ) + + self.settings["system_api_key"] = token + + # Avoid save signal loops, because this is called from a signal. + Environment.objects.filter(id=self.id).update(settings=self.settings) + + def bump_release(self) -> bool: + """Handles release updates for this environment based on the + update_strategy. Returns True if the environment was updated, or + False if not. + """ + + from projects.models import Release + + if self.release.is_pre: + return False + major, minor, _ = self.release.version_components + + release = None + if self.update_strategy == self.UPDATE_LATEST: + release = Release.objects.get_latest() + elif self.update_strategy == self.UPDATE_MINOR_LATEST: + release = Release.objects.get_latest(prefix=f"{major}.{minor}.") + elif self.update_strategy == self.UPDATE_MAJOR_LATEST: + release = Release.objects.get_latest(prefix=f"{major}.") + + if release and release != self.release: + self.release = release + self.save() + return True + return False + + @property + def environment_level_permissions(self): + """Returns all environment level permissions for this environment + as a queryset + """ + + return Permission.objects.filter( + Q( + name__startswith=f"{self.project.account.slug}:{self.project.slug}:{self.slug}|" + ) + | Q(name__startswith=f"{self.project.account.slug}|services:") + ) + + @property + def groups(self): + """Returns all groups that have environment level permissions for this + environment as a queryset""" + + return Group.objects.filter( + permissions__in=self.environment_level_permissions + ).distinct() + + @property + def users(self): + """Returns all active users that have access to this environment + as a queryset + """ + + return ( + User.objects.exclude(deactivated_at__isnull=False) + .filter(Q(groups__in=self.groups) | Q(groups__in=self.project.groups)) + .distinct() + .order_by("created_at") + ) + + @property + def roles_and_permissions(self) -> list: + permissions_for_viewers = [ + f"workbench:{settings.SERVICE_DBT_DOCS}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_SUPERSET}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_AIRFLOW}|{settings.ACTION_READ}", + f"{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_READ}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_READ}", + ] + + permissions_for_developers = [ + f"workbench:{settings.SERVICE_DBT_DOCS}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_SUPERSET}|{settings.ACTION_READ}", + f"{settings.SERVICE_SUPERSET_DATA_SOURCES}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_AIRFLOW}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_AIRBYTE}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_CODE_SERVER}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_LOCAL_DBT_DOCS}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_WRITE}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_READ}", + ] + + permissions_for_sysadmins = [ + f"{settings.SERVICE_SUPERSET_DATA_SOURCES}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_AIRFLOW_SYS_ADMIN}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_AIRFLOW_DAGS}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_AIRBYTE}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_WRITE}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_READ}", + ] + + permissions_for_admins = [ + f"{settings.SERVICE_SUPERSET_SECURITY}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_AIRFLOW_ADMIN}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_AIRBYTE}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_DATAHUB_ADMIN}|{settings.ACTION_WRITE}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_WRITE}", + ] + + roles_and_permissions = [ + ( + ExtendedGroup.Role.ROLE_ENVIRONMENT_DEVELOPER, + permissions_for_developers, + "environment developers", + ), + ( + ExtendedGroup.Role.ROLE_ENVIRONMENT_VIEWER, + permissions_for_viewers, + "environment viewers", + ), + ( + ExtendedGroup.Role.ROLE_ENVIRONMENT_SYSADMIN, + permissions_for_sysadmins, + "environment sys admins", + ), + ( + ExtendedGroup.Role.ROLE_ENVIRONMENT_ADMIN, + permissions_for_admins, + "environment admins", + ), + ] + + return roles_and_permissions + + def create_environment_groups(self, force_update=False): + """Create groups for this environment. This is run by the + post-save hook on environment create to make sure there is a + set of typical groups for the new environment. + """ + for role, permissions, group_name_suffix in self.roles_and_permissions: + existing_group = ExtendedGroup.objects.filter( + role=role, environment=self + ).first() + + if not existing_group or force_update: + # Create the group if does not exist + group, _ = Group.objects.get_or_create( + name=f"'{self.slug}' {group_name_suffix}" + ) + ExtendedGroup.objects.get_or_create( + group=group, + role=role, + account=self.account, + project=self.project, + environment=self, + ) + + if force_update: + group.permissions.clear() + + # Building the filter to the permissions dinamically + permission_filter = Q(name__endswith=permissions[0]) + for permission in permissions[1:]: + permission_filter |= Q(name__endswith=permission) + + # Getting and assing the permissions to the group + permissions_to_add = self.environment_level_permissions.filter( + permission_filter + ) + group.permissions.add(*permissions_to_add) + + def create_default_smtp_integration(self): + # If a default SMTP exists, return it. If not, create 'Datacoves SMTP' + try: + integration = Integration.objects.get( + account=self.account, + type=Integration.INTEGRATION_TYPE_SMTP, + is_default=True, + ) + except Integration.DoesNotExist: + integration = Integration.objects.create( + name="Datacoves SMTP", + account=self.account, + type=Integration.INTEGRATION_TYPE_SMTP, + settings={ + "server": "datacoves", + "host": "", + "mail_from": "", + "port": 587, + "user": "", + "password": "", + "ssl": False, + "start_tls": True, + "webhook_url": "", + }, + is_default=True, + ) + environment_integration, _ = EnvironmentIntegration.objects.get_or_create( + environment=self, + integration=integration, + service=settings.SERVICE_AIRFLOW, + ) + + return environment_integration + + @staticmethod + def from_permission_names(permission_names): + """Returns a queryset for querying environments by permission name""" + + if not permission_names: + return [] + filters = Q() + for name in permission_names: + permission_data = parse_permission_name(name) + env_slug = permission_data.get("environment_slug") + project_slug = permission_data.get("project_slug") + account_slug = permission_data.get("account_slug") + if env_slug: + filters |= Q(slug=env_slug) + elif project_slug: + filters |= Q(project__slug=project_slug) + elif account_slug: + filters |= Q(project__account__slug=account_slug) + return Environment.objects.filter(filters) + + @property + def account(self): + """Returns the account object associated with this environment by + way of the project linkage""" + + return self.project.account + + @property + def final_services(self): + """Services defined in environment + updated by profile""" + + services = self.services.copy() + services.update( + { + settings.SERVICE_LOCAL_DBT_DOCS: { + "enabled": services["code-server"]["enabled"] + and self.profile.dbt_local_docs, + "valid": True, + }, + } + ) + + return services + + @property + def profile_flags(self) -> dict: + """Returns global and release profile files combined, from current release""" + + flags = self.release.profile_flags + if flags: + base = flags.get("global", {}) + base.update(flags.get(self.release_profile, {})) + return base + return {} + + def is_service_enabled(self, service_name): + """Checks if a services is enabled, including internal services""" + + options = self.final_services.get(service_name) + if not options: + return self.is_internal_service_enabled(service_name) + enabled = options.get("enabled", False) + assert isinstance(enabled, bool) + return enabled + + def is_service_valid(self, service_name): + """Checks if a services is enabled, including internal services""" + + service = self.final_services.get(service_name) + valid = service.get("valid", True) + assert isinstance(valid, bool) + return valid + + def is_service_enabled_and_valid(self, service_name): + """Is the given service enabled and valid; combines the + is_service_enabled and is_service_valid checks + """ + + return self.is_service_enabled(service_name) and self.is_service_valid( + service_name + ) + + def is_internal_service_enabled(self, service_name): + """is_service_enabled, but restricted to internal services""" + + options = self.internal_services.get(service_name) + if not options: + return False + enabled = options.get("enabled", False) + assert isinstance(enabled, bool) + return enabled + + def enabled_and_valid_services(self): + """Returns a set of services that are enabled and valid""" + + return { + service + for service in self.services + if self.is_service_enabled_and_valid(service) + } + + def get_service_image( + self, service: str, repo: str, tag_prefix=None, include_registry=True + ): + """Gets the service docker image, for the given service and + repository + """ + + image, tag = self.release.get_service_image(service, repo, tag_prefix) + if include_registry and self.docker_registry: + image = f"{self.docker_registry}/{image}" + return image, tag + + def get_image(self, repo: str, use_release_profile=False): + """Gets an image for the given repository, optionally using the + profile image set + """ + + release_repo = f"{repo}-{self.release_profile}" if use_release_profile else repo + if self.profile.image_set: + image, tag = self.profile.image_set.get_image( + repo, self.docker_registry, release_repo + ) + else: + image, tag = self.release.get_image(release_repo) + if self.docker_registry: + image = f"{self.docker_registry}/{image}" + return image, tag + + def get_plan(self): + """Fetches the plan associated with this environment by way of the + project and account. + """ + + from billing.models import Plan + + plan: Plan = Plan.objects.filter(account__projects__environments=self).first() + return plan + + def get_quota(self): + """Get the combined quota, using the plan as the basis but applying + overrides from the environment level. + """ + + quota = None + if self.quotas: + quota = self.quotas + else: + plan = self.get_plan() + if plan: + quota = plan.environment_quotas + return quota + + @property + def dbt_profile(self): + """Gets the dbt_profile from the settings dictionary; or fetch + it from project settings if not set. Or return 'default' if neither + is set. + """ + + return self.settings.get( + "dbt_profile", self.project.settings.get("dbt_profile", "default") + ) + + def get_user_services(self, user: User) -> dict: + """Returns a list of user-level services available to a given user""" + + services = self.final_services + + user_services_enabled = [ + service_name + for service_name in settings.USER_SERVICES + if self.is_service_enabled_and_valid(service_name) + ] + + if user_services_enabled: + ue = ( + UserEnvironment.objects.filter(user=user, environment=self) + .only("services") + .first() + ) + + if ue: + for service_name in user_services_enabled: + services.get(service_name).update(ue.services.get(service_name, {})) + + return services + + @property + def k8s_namespace(self) -> str: + return f"{NAMESPACE_PREFIX}{self.slug}" diff --git a/src/core/api/app/projects/models/environment_integration.py b/src/core/api/app/projects/models/environment_integration.py new file mode 100644 index 00000000..ce031360 --- /dev/null +++ b/src/core/api/app/projects/models/environment_integration.py @@ -0,0 +1,72 @@ +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import models + + +class EnvironmentIntegration(AuditModelMixin, DatacovesModel): + """Links environments to supported integrations, such as Slack, etc. + + Provides the settings needed for the environment to use each integration. + The integrations are defined in :model:`integrations.Integration`. + + ========= + Constants + ========= + + - SERVICES - A list of tuple pairs for populating a select box. These + are populated from settings.SERVICES + + ======= + Methods + ======= + + - **clean()** - Private method to do validation + - **save(...)** - Overridden to support validation + """ + + SERVICES = [(service, service.title()) for service in sorted(settings.SERVICES)] + + environment = models.ForeignKey( + "Environment", on_delete=models.CASCADE, related_name="integrations" + ) + integration = models.ForeignKey( + "integrations.Integration", + on_delete=models.CASCADE, + related_name="environments", + ) + service = models.CharField(max_length=50, choices=SERVICES) + settings = models.JSONField( + default=dict, + blank=True, + null=True, + help_text="Specific configuration for the service that uses the integration", + ) + + def clean(self): + """Do validation to make sure the environemt and integration are + in the same account. Makes sure the selected integration type + is supported for the given service. Raises ValidationError if + there is a problem. + """ + + from clusters.adapters.all import get_supported_integrations + + if self.environment.project.account != self.integration.account: + raise ValidationError( + "Environment and Integration must belong to the same Account" + ) + if self.integration.type not in get_supported_integrations(self.service): + raise ValidationError( + f"'{self.integration.type}' integration type not supported by {self.service}" + ) + + def __str__(self): + return f"{self.environment}:{self.integration}" + + def save(self, *args, **kwargs): + """Does validation checks""" + + self.clean() + return super().save(*args, **kwargs) diff --git a/src/core/api/app/projects/models/profile.py b/src/core/api/app/projects/models/profile.py new file mode 100644 index 00000000..8f0bf70e --- /dev/null +++ b/src/core/api/app/projects/models/profile.py @@ -0,0 +1,524 @@ +from autoslug import AutoSlugField +from codegen.models import Template +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import models, transaction +from packaging import version +from packaging.requirements import InvalidRequirement, Requirement + +from lib.utils import get_pending_tasks + + +def profile_slug(instance): + if instance.account: + return f"{instance.name}-{instance.account.slug}" + else: + return instance.name + + +def profile_file_slug(instance): + return f"{instance.template}-{instance.mount_path}" + + +class Profile(AuditModelMixin, DatacovesModel): + """ + A Profile holds Environment configuration reusable across an account's + environments, mostly code_server configuration. + + Profiles can be either for the system or for the account. System profiles + have created_by = None. Profiles have files associated with them, and + can inherit from file lists from other profiles via the "files_from" + link. + + The files, in turn, are :model:`codegen.Template` templates. + + ======= + Methods + ======= + + - **clean()** - Private method to do validation + - **save(...)** - Overridden save to do validation + """ + + name = models.CharField(max_length=32, unique=True) + slug = AutoSlugField(populate_from=profile_slug, unique=True) + account = models.ForeignKey( + "users.Account", on_delete=models.CASCADE, blank=True, null=True + ) + + dbt_sync = models.BooleanField( + default=True, + help_text="If enabled, dbt core interface gets installed as a requirement of" + " the datacoves power user extension", + ) + dbt_local_docs = models.BooleanField( + default=True, + help_text="If enabled, a web server is launched to serve local dbt docs", + ) + mount_ssl_keys = models.BooleanField( + default=True, help_text="When enabled, ssl keys are mounted under /config/.ssl/" + ) + mount_ssh_keys = models.BooleanField( + default=True, help_text="When enabled, ssl keys are mounted under /config/.ssh/" + ) + mount_api_token = models.BooleanField( + default=True, + help_text="If enabled, an api_token is mounted as environment variable", + ) + clone_repository = models.BooleanField( + default=True, + help_text="When enabled, the project git repository gets cloned automatically", + ) + files_from = models.ForeignKey( + "self", + on_delete=models.SET_NULL, + blank=True, + null=True, + help_text="Profile used as starting point for files configuration. " + "Files added to current profile are appended to the base profile files list.", + ) + created_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + related_name="created_profiles", + blank=True, + null=True, + help_text="If created_by is null, it is a system profile", + ) + updated_by = models.ForeignKey( + settings.AUTH_USER_MODEL, + on_delete=models.SET_NULL, + related_name="updated_profiles", + blank=True, + null=True, + ) + + def __str__(self): + return self.name + + @property + def image_set(self): + """The last image set ready to be used""" + return self.image_sets.exclude(images={}).order_by("-id").first() + + @property + def latest_image_set(self): + """The last image set no matter if it's not ready to be used""" + return self.image_sets.order_by("-id").first() + + @property + def is_system_profile(self) -> bool: + """Is this a system profile?""" + + return self.created_by is None + + def clean(self): + """Validates that system profiles are not modified. Also makes + sure that the 'files_from' profile is in the same account as + this profile (or files_from profile has no account set) + + Raises ValidationError if there is a problem. + """ + + if self.pk and self.is_system_profile and self.updated_by: + raise ValidationError("Profiles created by system can not be modified.") + if ( + self.account + and self.files_from + and self.files_from.account + and self.files_from.account != self.account + ): + raise ValidationError( + "Base profile must belong to the same profile Account" + ) + + def save(self, *args, **kwargs): + """Overriden to provide validation via clean()""" + + self.clean() + super().save(*args, **kwargs) + + +class ProfileImageSet(DatacovesModel): + """ + A set of docker images, python libraries, and other dependencies + specific to a release for use in profiles. + + This is a way to customize environments to specific customer needs. + For this to function, it is required that at least one of the + build_* booleans is set to True. + + ========= + Constants + ========= + + - BASE_IMAGES - a map of services to default/base Docker images + + ======= + Methods + ======= + + - **clean()** - Private method to handle validation + - **save(...)** - Overridden to support validation + - **images_without_registry(registry)** - returns a dictionary mapping + image name without registry prefix to tags + - **get_image(repo, docker_registry, release_repo)** - Returns a + tuple of (image, tag) + - **set_image_status(image_tag, status)** - Sets the image status for + a given image tag in the images_status dictionary + - **set_images_if_built()** - Sets the 'images' field to all build + images according to 'images_status' and triggers a workspace sync. + - **_trigger_workspace_sync()** - Triggers the sync process for all + environments using the profile + - **is_compatible(release)** - Returns if the passed releease is + compatible with the ProfileImageSet's release. + """ + + IMAGE_STATUS_BUILT = "built" + IMAGE_STATUS_BUILT_ERROR = "build_error" + + BASE_IMAGES = { + "code_server": "datacovesprivate/code-server-code-server-base", + "dbt_core_interface": "datacovesprivate/code-server-dbt-core-interface-base", + "airflow": "datacovesprivate/airflow-airflow-base", + "ci_basic": "datacoves/ci-basic-base", + "ci_airflow": "datacoves/ci-airflow-base", + } + + profile = models.ForeignKey( + Profile, on_delete=models.CASCADE, related_name="image_sets" + ) + + ## Build inputs, set these before building. + release = models.ForeignKey( + "Release", + on_delete=models.PROTECT, + help_text="Release that contains images from where new images will be based.", + ) + # https://pip.pypa.io/en/stable/reference/requirements-file-format/ + python_requirements = models.JSONField( + default=list, + null=True, + blank=True, + help_text="List of python libs to be used in both airflow and code server" + ' images, e.g. ["Django==5.0.7"]', + ) + airflow_requirements = models.JSONField( + default=list, + null=True, + blank=True, + help_text='List of python libs to be used in airflow images, e.g. ["Django==5.0.7"]', + ) + code_server_requirements = models.JSONField( + default=list, + null=True, + blank=True, + help_text='List of python libs to be used in code server images, e.g. ["Django==5.0.7"]', + ) + ci_requirements = models.JSONField( + default=list, + null=True, + blank=True, + help_text='List of python libs to be used in ci images, e.g. ["Django==5.0.7"]', + ) + code_server_extensions = models.JSONField( + default=list, + null=True, + blank=True, + help_text="List of urls to vscode extensions that will be downloaded, unzipped and installed.", + ) + build_code_server = models.BooleanField( + default=True, + help_text="If True, the build_profile_image_set task will build this " + "docker image using requirements specified in the profile image set.", + ) + build_dbt_core_interface = models.BooleanField( + default=True, + help_text="If True, the build_profile_image_set task will build this " + "docker image using requirements specified in the profile image set.", + ) + build_airflow = models.BooleanField( + default=False, + help_text="If True, the build_profile_image_set task will build this " + "docker image using requirements specified in the profile image set.", + ) + build_ci_basic = models.BooleanField( + default=False, + help_text="If True, the build_profile_image_set task will build this " + "docker image using requirements specified in the profile image set.", + ) + build_ci_airflow = models.BooleanField( + default=False, + help_text="If True, the build_profile_image_set task will build this " + "docker image using requirements specified in the profile image set.", + ) + + ## Build state. + images_status = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="A dictionary mapping docker image names to their build status.", + ) + + ## Built images. Keep in mind that custom docker registries are prefixed to image repos + images = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="A dictionary mapping docker image names to tags (versions)." + " if empty, it means the build process didn't complete", + ) + + images_logs = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="Kubernetes logs", + ) + + def __str__(self): + return f"{self.profile.name}:{self.id}" + + def clean(self): + """Do validation on various fields. Python requirement fields will + be validated to make sure they are in a correct format. Also, + at least one of the build_* booleans must be True. Throws a + ValidationError if there is a problem. + """ + + def validate_reqs(reqs): + for req in reqs: + try: + parsed_req = Requirement(req) + except InvalidRequirement: + raise ValidationError( + f"Could not parse requirement: {req}. " + "Remember to prefix with 'name@' when using git urls." + ) + if not parsed_req.name: + raise ValidationError(f"Missing name in requirement {req}") + + validate_reqs(self.python_requirements) + validate_reqs(self.code_server_requirements) + validate_reqs(self.airflow_requirements) + if ( + not self.build_code_server + and not self.build_airflow + and not self.build_ci_basic + and not self.build_ci_airflow + and not self.build_dbt_core_interface + ): + raise ValidationError("Please select at least one image to build") + + def save(self, *args, **kwargs): + """Overridden save to support validation fvia clean()""" + + self.clean() + return super().save(*args, **kwargs) + + def images_without_registry(self, registry): + """Returns a dictionary mapping image name without registry prefix + to tags""" + + # Removing docker registry from image repo names + return ( + {name.replace(f"{registry}/", ""): tag for name, tag in self.images.items()} + if registry + else self.images + ) + + def get_image(self, repo: str, docker_registry: str, release_repo: str): + """Returns an image tuple repo, tag as the Release model does""" + path, name = repo.rsplit("/", 1) + image = f"{path}/pi{self.id}-{name}" + tag = self.images_without_registry(docker_registry).get(image) + if tag: + return image, tag + # try with a base image (using release profiles based image) + image += "-base" + tag = self.images_without_registry(docker_registry).get(image) + if tag: + return image, tag + return self.release.get_image(release_repo) + + def set_image_status(self, image_tag: str, status: str, logs: str = ""): + """Sets the image status for a given image tag in the images_status + dictionary""" + + with transaction.atomic(durable=True): + image_set = ( + ProfileImageSet.objects.select_for_update() + .only("images_status", "images_logs") + .filter(id=self.id) + .first() + ) + if image_set is None or image_tag not in image_set.images_status: + return False + + if image_set.images_logs is None: + image_set.images_logs = {} + + image_set.images_status[image_tag] = status + image_set.images_logs[image_tag] = logs + image_set.save(update_fields=["images_status", "images_logs"]) + return True + + def set_images_if_built(self) -> bool: + """Sets the 'images' field to all build images according to + 'images_status' and triggers a workspace sync. + """ + + images = {} + for image, status in self.images_status.items(): + if status != ProfileImageSet.IMAGE_STATUS_BUILT: + return False + name, tag = image.split(":") + images[name] = tag + self.images = images + self.save(update_fields=["images"]) + self._trigger_workspace_sync() + return True + + def clean_images_logs(self): + if self.images_logs is None: + return + + images_logs = {} + for image_logs, logs in self.images_logs.items(): + image_status = self.images_status.get(image_logs) + if image_status: + images_logs[image_logs] = logs + + self.images_logs = images_logs + self.save(update_fields=["images_logs"]) + + def _trigger_workspace_sync(self): + """Triggers the sync process for all environments using the profile""" + + from clusters import workspace + + pending_tasks = get_pending_tasks("clusters.workspace.sync_task") + + for env in self.profile.environments.all(): + workspace.sync( + env, + "profile.ProfileImageSet._trigger_workspace_sync", + pending_tasks=pending_tasks, + ) + + def is_compatible(self, release) -> bool: + """Returns if released passed is compatible with PIS release""" + if self.release.name == release.name: + return True + + current_release_version = version.parse(self.release.name) + new_release_version = version.parse(release.name) + if ( + current_release_version.major == new_release_version.major + and current_release_version.minor == new_release_version.minor + ): + return True + + try: + for name, repo in self.BASE_IMAGES.items(): + builds = getattr(self, f"build_{name}") + if builds and self.release.get_image(repo) != release.get_image(repo): + return False + except KeyError: + return False + return True + + +class ProfileFile(DatacovesModel): + """Files generated by a template that are mounted on specific location on + code server + + Only the following template contexts are supported: + + - Template.CONTEXT_TYPE_USER_CREDENTIALS, + - Template.CONTEXT_TYPE_NONE, + - Template.CONTEXT_TYPE_USER, + - Template.CONTEXT_TYPE_ENVIRONMENT, + + ======= + Methods + ======= + + - **clean()** - Private function to do validation + - **save(...)** - Overidden to support validation + """ + + PERMISSION_644 = "0o644" + PERMISSION_744 = "0o744" + PERMISSION_600 = "0o600" + PERMISSION_700 = "0o700" + + PERMISSION_CHOICES = [ + (PERMISSION_644, "644"), + (PERMISSION_744, "744"), + (PERMISSION_600, "600"), + (PERMISSION_700, "700"), + ] + + slug = AutoSlugField(populate_from=profile_file_slug) + mount_path = models.CharField(max_length=250, help_text="Path for the file") + profile = models.ForeignKey(Profile, on_delete=models.CASCADE, related_name="files") + template = models.ForeignKey( + Template, on_delete=models.CASCADE, related_name="profile_files" + ) + override_existent = models.BooleanField( + default=True, + help_text="When enabled, if a file is found, it will be overwritten.", + ) + execute = models.BooleanField( + default=False, + help_text="Specifies if file should be executed, requires shebang set " + "on file. If this is set, override_existent will be forced to True.", + ) + # Right now, 644 is the default, set to 744 for executable files in 60-profile-files.py task + permissions = models.CharField( + max_length=5, + choices=PERMISSION_CHOICES, + default=PERMISSION_644, + help_text="File permissions", + ) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["profile", "slug"], + name="Profile file slug uniqueness", + ) + ] + + def __str__(self): + return self.slug + + def clean(self): + """Checks to make sure the context is valid""" + + valid_contexts = ( + Template.CONTEXT_TYPE_USER_CREDENTIALS, + Template.CONTEXT_TYPE_NONE, + Template.CONTEXT_TYPE_USER, + Template.CONTEXT_TYPE_ENVIRONMENT, + ) + + if self.template: + self.template.is_enabled_for(__class__.__name__) + if self.template.context_type not in valid_contexts: + raise ValidationError( + f"You must select a template with a valid context: {str(valid_contexts)}" + ) + + def save(self, *args, **kwargs): + """Runs validation, and also forces override_existent to True if + execute is set.""" + + if self.execute: + self.override_existent = True + self.clean() + return super().save(*args, **kwargs) diff --git a/src/core/api/app/projects/models/project.py b/src/core/api/app/projects/models/project.py new file mode 100644 index 00000000..6cc57dcd --- /dev/null +++ b/src/core/api/app/projects/models/project.py @@ -0,0 +1,589 @@ +import datetime +import uuid + +from autoslug import AutoSlugField +from core.fields import EncryptedJSONField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from credentials.models import Secret +from django.conf import settings +from django.contrib.auth.models import ContentType, Group, Permission +from django.core.exceptions import ValidationError +from django.db import models +from django.db.models import Q +from users.models import ( + ExtendedGroup, + User, + make_permission_name, + parse_permission_name, +) + +from ..azure import AzureDevops + +MAX_SLUG_LENGTH = 30 + + +def project_slug(instance): + return f"{instance.name}-{instance.account.slug}"[:MAX_SLUG_LENGTH] + + +class Project(AuditModelMixin, DatacovesModel): + """Projects are the critical glue between environments, accounts, and + other key system components + + Note that permissions can be project level or environment level. + + ========= + Constants + ========= + + ---------------- + Clone Strategies + ---------------- + + - SSH_CLONE_STRATEGY + - HTTP_CLONE_STRATEGY + - CLONE_STRATEGIES - Tuple of tuple pairs for select box population + + How do we clone GIT, through SSH or HTTP? + + ------------ + CI Providers + ------------ + + - CI_PROVIDER_GITHUB + - CI_PROVIDER_GITLAB + - CI_PROVIDER_BAMBOO + - CI_PROVIDER_JENKINS + - CI_PROVIDER_CIRCLECI + - CI_PROVIDER_OTHER + - CI_PROVIDERS - tuple of tuple pairs for select box population + + ======= + Methods + ======= + + - **clean()** - Private method to perform validation on the Project + - **save(...)** - Overrides save to provide validation + - **user_has_access(user)** - Does the given user have access to this + project? + - **create_permissions()** - Used by a post-save hook to create + necessary permissions for newly created projects + - **create_project_groups()** - Used by a post-save hook to create + necessary groups for newly created projects + """ + + SSH_CLONE_STRATEGY = "ssh_clone" + HTTP_CLONE_STRATEGY = "http_clone" + AZURE_SECRET_CLONE_STRATEGY = "azure_secret_clone" + AZURE_CERTIFICATE_CLONE_STRATEGY = "azure_certificate_clone" + + CLONE_STRATEGIES = ( + ( + SSH_CLONE_STRATEGY, + "SSH git clone", + ), + ( + HTTP_CLONE_STRATEGY, + "HTTP git clone", + ), + ( + AZURE_SECRET_CLONE_STRATEGY, + "Azure Secret clone", + ), + ( + AZURE_CERTIFICATE_CLONE_STRATEGY, + "Azure Certificate clone", + ), + ) + + CI_PROVIDER_GITHUB = "github" + CI_PROVIDER_GITLAB = "gitlab" + CI_PROVIDER_BAMBOO = "bamboo" + CI_PROVIDER_JENKINS = "jenkins" + CI_PROVIDER_CIRCLECI = "circleci" + CI_PROVIDER_OTHER = "other" + CI_PROVIDER_AZURE_DEVOPS = "azure_devops" + CI_PROVIDERS = ( + ( + CI_PROVIDER_GITHUB, + "GitHub", + ), + ( + CI_PROVIDER_GITLAB, + "Gitlab", + ), + ( + CI_PROVIDER_BAMBOO, + "Bamboo", + ), + ( + CI_PROVIDER_JENKINS, + "Jenkins", + ), + ( + CI_PROVIDER_CIRCLECI, + "CircleCI", + ), + ( + CI_PROVIDER_OTHER, + "Other", + ), + ( + CI_PROVIDER_AZURE_DEVOPS, + "Azure DevOps", + ), + ) + + name = models.CharField(max_length=50) + slug = AutoSlugField(populate_from=project_slug, unique=True) + account = models.ForeignKey( + "users.Account", on_delete=models.CASCADE, related_name="projects" + ) + repository = models.ForeignKey( + "Repository", + on_delete=models.CASCADE, + help_text="GIT Repository to use for this project", + ) + release_branch = models.CharField( + max_length=130, + default="main", + help_text="Which branch is used for releases in the GIT repository", + ) + clone_strategy = models.CharField( + max_length=60, choices=CLONE_STRATEGIES, default=SSH_CLONE_STRATEGY + ) + # not null if clone_strategy == ssh_clone + deploy_key = models.ForeignKey( + "SSHKey", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="projects", + help_text="Required for SSH clone strategy", + ) + azure_deploy_key = models.ForeignKey( + "SSLKey", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="projects", + help_text="Required for Azure certificate clone strategy", + ) + # not null if clone_strategy == http_clone + deploy_credentials = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="Required for HTTP clone stategy. This will be a JSON " + "dictionary with keys 'git_username' and 'git_password'. This is " + "also used by the Azure deployments to provide azure_tenant and " + "oauth credentials", + ) + ci_home_url = models.URLField( + max_length=250, + blank=True, + null=True, + help_text="Base URL for CI, if CI is being used.", + ) + ci_provider = models.CharField( + max_length=50, blank=True, null=True, choices=CI_PROVIDERS + ) + validated_at = models.DateTimeField( + blank=True, + null=True, + help_text="Projects must be validated for services to run. This " + "is usually set by the system.", + ) + settings = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="Settings propagated to all environment settings. " + "Avoid reading this field, instead, read environment settings.", + ) + variables = EncryptedJSONField( + default=dict, + null=True, + blank=True, + help_text="Environment variables used across the entire project", + ) + secrets_backend = models.CharField( + max_length=50, + choices=Secret.SECRETS_BACKENDS, + default=Secret.SECRETS_BACKEND_DATACOVES, + help_text="Secrets backend used to store/read secrets managed via admin.", + ) + secrets_backend_config = EncryptedJSONField( + default=dict, + null=True, + blank=True, + help_text="Configuration needed to connect to chosen secrets backend", + ) + secrets_secondary_backend = models.CharField( + max_length=512, + null=True, + blank=True, + help_text="This is an Airflow class 'dot path' to enable the use " + "of a secondary secret backend if the Datacoves Secret Backend is " + "in use.", + ) + secrets_secondary_backend_config = EncryptedJSONField( + null=True, + blank=True, + help_text="When a secondary backend is chosen, this is the Airflow " + "configuration block for the backend. It should be a bunch of " + "key=value pairs.", + ) + + release_branch_protected = models.BooleanField(default=True) + + uid = models.UUIDField( + default=uuid.uuid4, + editable=False, + unique=True, + help_text="For dynamic authentication, we need to have a unique ID " + "to reference this project that isn't sequential as a security " + "token. Since this is internal only, we can restrict access to " + "inside the Kubernetes cluster only, anad this is not accessible " + "to end users so it should be sufficiently secure.", + ) + + @property + def public_azure_key(self): + """The public Azure SSL key if set, None if not set""" + + if self.azure_deploy_key: + return self.azure_deploy_key.public + return None + + @property + def public_ssh_key(self): + """The public SSH key if set, None if not set""" + + if self.deploy_key: + return self.deploy_key.public + return None + + @property + def public_ssh_key_type(self): + """The public SSH key type if set, None if not set""" + + if self.deploy_key: + return self.deploy_key.key_type + return None + + def __str__(self): + return self.slug + + def clean(self): + """Do validation; this checks to make sure the necessary credential + field is set for the given clone_stategy. Raises ValidationError + if there is an issue + """ + + if self.clone_strategy == self.HTTP_CLONE_STRATEGY and not self.repository.url: + raise ValidationError( + "Repository.url is required when clone strategy is HTTP." + ) + if self.clone_strategy == self.SSH_CLONE_STRATEGY and not self.deploy_key: + raise ValidationError("Deploy key is required when clone strategy is SSH.") + + def save(self, *args, **kwargs): + """Wraps save in order to implement validation""" + + self.clean() + return super().save(*args, **kwargs) + + def user_has_access(self, user): + """Does the given user have access to this project?""" + + return any( + [ + perm.name.startswith(f"{self.account.slug}:{self.slug}") + for perm in user.get_account_permissions(self.account).all() + ] + ) + + def create_permissions(self): + """Used by a post-save hook to create necessary permissions for + newly created projects""" + + content_type = ContentType.objects.get(app_label="users", model="account") + for resource in settings.WORKBENCH_RESOURCES: + for action in (settings.ACTION_READ, settings.ACTION_WRITE): + name = make_permission_name( + resource, + action, + account_slug=self.account.slug, + project_slug=self.slug, + ) + + Permission.objects.get_or_create( + name=name, + content_type=content_type, + defaults={"codename": name[:100]}, + ) + + # Create the service user if necessary and make a system token for + # it. + if "system_api_key" not in self.settings: + from iam.models import DatacovesToken + + sa_user = self.setup_service_account() + instance, token = DatacovesToken.objects.create( + user=sa_user, + expiry=None, + prefix="", + type=DatacovesToken.TYPE_PROJECT, + project=self, + is_system=True, + ) + + self.settings["system_api_key"] = token + + # Avoid save signal loops, because this is called from a signal. + Project.objects.filter(id=self.id).update(settings=self.settings) + + @property + def project_level_permissions(self): + """Returns a queryset of Permission objects associated with this + project + """ + + return Permission.objects.filter( + Q(name__startswith=f"{self.account.slug}:{self.slug}|") + | Q(name__startswith=f"{self.account.slug}|services:") + ) + + @property + def groups(self): + """Returns a queryset of Group objects associated with this + project + """ + + return Group.objects.filter( + permissions__in=self.project_level_permissions + ).distinct() + + @staticmethod + def from_permission_names(permission_names): + """Returns a queryset of Project objects based on the given + permission names list""" + + if not permission_names: + return [] + filters = Q() + for name in permission_names: + permission_data = parse_permission_name(name) + env_slug = permission_data.get("environment_slug") + project_slug = permission_data.get("project_slug") + account_slug = permission_data.get("account_slug") + if env_slug: + filters |= Q(environments__slug=env_slug) + elif project_slug: + filters |= Q(slug=project_slug) + elif account_slug: + filters |= Q(account__slug=account_slug) + return Project.objects.filter(filters) + + @property + def roles_and_permissions(self) -> list: + permissions_for_viewers = [ + f"workbench:{settings.SERVICE_DBT_DOCS}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_SUPERSET}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_AIRFLOW}|{settings.ACTION_READ}", + f"{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_READ}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_READ}", + ] + + permissions_for_developers = [ + f"workbench:{settings.SERVICE_DBT_DOCS}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_SUPERSET}|{settings.ACTION_READ}", + f"{settings.SERVICE_SUPERSET_DATA_SOURCES}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_AIRFLOW}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_AIRBYTE}|{settings.ACTION_READ}", + f"workbench:{settings.SERVICE_CODE_SERVER}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_LOCAL_DBT_DOCS}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_WRITE}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_READ}", + ] + + permissions_for_sysadmins = [ + f"{settings.SERVICE_SUPERSET_DATA_SOURCES}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_AIRFLOW_SYS_ADMIN}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_AIRFLOW_DAGS}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_AIRBYTE}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_DATAHUB_DATA}|{settings.ACTION_WRITE}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_READ}", + ] + + permissions_for_admins = [ + f"{settings.SERVICE_SUPERSET_SECURITY}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_AIRFLOW_ADMIN}|{settings.ACTION_WRITE}", + f"workbench:{settings.SERVICE_AIRBYTE}|{settings.ACTION_WRITE}", + f"{settings.SERVICE_DATAHUB_ADMIN}|{settings.ACTION_WRITE}", + f"services:{settings.INTERNAL_SERVICE_GRAFANA}:dashboards|{settings.ACTION_WRITE}", + ] + + roles_and_permissions = [ + ( + ExtendedGroup.Role.ROLE_PROJECT_DEVELOPER, + permissions_for_developers, + "project developers", + ), + ( + ExtendedGroup.Role.ROLE_PROJECT_VIEWER, + permissions_for_viewers, + "project viewers", + ), + ( + ExtendedGroup.Role.ROLE_PROJECT_SYSADMIN, + permissions_for_sysadmins, + "project sys admins", + ), + ( + ExtendedGroup.Role.ROLE_PROJECT_ADMIN, + permissions_for_admins, + "project admins", + ), + ] + + return roles_and_permissions + + def create_project_groups(self, force_update=False): + """Used by a post-save hook to create necessary groups for newly created projects""" + + for role, permissions, group_name_suffix in self.roles_and_permissions: + existing_group = ExtendedGroup.objects.filter( + role=role, project=self + ).first() + + if not existing_group or force_update: + # Create the group if does not exist + group, _ = Group.objects.get_or_create( + name=f"'{self.slug}' {group_name_suffix}" + ) + ExtendedGroup.objects.get_or_create( + group=group, + role=role, + account=self.account, + project=self, + ) + + if force_update: + group.permissions.clear() + + # Building the filter to permissions dinamically + permission_filter = Q(name__endswith=permissions[0]) + for permission in permissions[1:]: + permission_filter |= Q(name__endswith=permission) + + # Getting and assing the permissions to the group + permissions_to_add = self.project_level_permissions.filter( + permission_filter + ) + group.permissions.add(*permissions_to_add) + + def update_oauth_if_needed(self, force: bool = False): + """ + This will update the oauth credentials for this project, if + it is necessary to do so. You can pass force to True to make sure + the update happens. If this project isn't using a clone strategy + that requires oauth (i.e. one of the Azures as of the time of this + writing) it won't do anything. + + This can raise an exception if there is an error in Azure. + """ + + if self.clone_strategy not in ( + self.AZURE_SECRET_CLONE_STRATEGY, + self.AZURE_CERTIFICATE_CLONE_STRATEGY, + ): + return + + # Do we need to fetch a new access token? + if force: + expires_at = None + else: + expires_at = self.deploy_credentials.get("expires_at") + + # Current timestamp, minus a minute to give some headroom + # for expiration. + now = datetime.datetime.now(datetime.timezone.utc) - datetime.timedelta( + seconds=60 + ) + + if expires_at: + # Convert to datetime + expires_at = datetime.datetime.strptime( + f"{expires_at} +0000", + "%Y-%m-%d %H:%M:%S.%f %z", + ) + + else: + # Expire now + expires_at = now + + # If we're not expired, and we have credentials, then we can + # short circuit right here. + if ( + expires_at > now + and "oauth_username" in self.deploy_credentials + and "oauth_password" in self.deploy_credentials + ): + return + + if self.clone_strategy == self.AZURE_SECRET_CLONE_STRATEGY: + az = AzureDevops( + self.deploy_credentials.get("azure_tenant", ""), + self.deploy_credentials.get("git_username", ""), + self.deploy_credentials.get("git_password", ""), + ) + + else: + az = AzureDevops( + self.deploy_credentials.get("azure_tenant", ""), + self.deploy_credentials.get("git_username", ""), + None, + self.azure_deploy_key.public + "\n" + self.azure_deploy_key.private, + ) + + oauth_creds = az.get_access_token() + + self.deploy_credentials["oauth_username"] = oauth_creds["accessToken"] + self.deploy_credentials["oauth_password"] = "" + self.deploy_credentials["expires_at"] = oauth_creds["expiresOn"] + + self.save() + + def setup_service_account(self) -> User: + """Create, or return if it already exists, a service account for this + project. + """ + + email = f"project-{self.slug}@{settings.BASE_DOMAIN}" + + sa_user, _ = User.objects.get_or_create( + email=email, + defaults={ + "is_service_account": True, + "name": f"Project {self.slug} Service Account", + }, + ) + + # Set permissions + # + # Because this is project level, we have to do it with the group + # instead of individual permissions as is done with the environment + # level folks. + + group = ExtendedGroup.objects.filter( + project=self, name__contains="Project Admin" + ).first() + + # This doesn't exist on test + if group is not None: + sa_user.groups.add(group.group) + + return sa_user diff --git a/src/core/api/app/projects/models/release.py b/src/core/api/app/projects/models/release.py new file mode 100644 index 00000000..7b960384 --- /dev/null +++ b/src/core/api/app/projects/models/release.py @@ -0,0 +1,238 @@ +from clusters.adapters.airbyte import AirbyteAdapter +from clusters.adapters.airflow import AirflowAdapter +from clusters.adapters.superset import SupersetAdapter +from clusters.models import Cluster +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.db import models + + +class ReleaseManager(models.Manager): + def get_latests(self, prefix=""): + cluster = Cluster.objects.current().only("provider").first() + qs = self.get_queryset() + if prefix: + qs = qs.filter(name__startswith=prefix) + if cluster and not cluster.is_local: + qs = qs.exclude(name__startswith="pre") + return qs.order_by("-name") + + def get_latest(self, prefix=""): + return self.get_latests(prefix=prefix).first() + + +class Release(AuditModelMixin, DatacovesModel): + """A Release specifies a set of docker images to be used in an Environment. + + It uses a custom ReleaseManager which provides a 'get_latest' and + 'get_latests' which return the latest release or the relases in + descending order of newness respectively. + + ======= + Methods + ======= + + - **get_service_image(service, repo, tag_prefix=None)** + Returns a service image tuple repo, tag + - **get_image(repo)** - Returns an image tuple repo, tag + - **_get_image_core(repo)** - Get the core image for 'repo' without the + tag or None if the image could not be found. + - **is_supported(env)** - Returns True if a given environment's services + are supported by this release. + """ + + name = models.CharField(max_length=32, unique=True) + notes = models.TextField(null=True, blank=True, help_text="Release notes") + commit = models.CharField(max_length=100, help_text="GIT Commit Hash") + + released_at = models.DateTimeField() + + images = models.JSONField( + default=dict, + help_text="A dictionary mapping docker image names to tags (versions).", + ) + + airbyte_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + airbyte_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + airflow_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + airflow_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + minio_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + superset_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + superset_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + elastic_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + elastic_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + neo4j_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + neo4j_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + postgresql_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + postgresql_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + kafka_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + kafka_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + datahub_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + datahub_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + promtail_chart = models.JSONField( + default=dict, help_text="Helm Chart details for this service" + ) + ci_images = models.JSONField( + default=list, help_text="Dictionary mapping image names to tags for CI images" + ) + observability_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + core_images = models.JSONField( + default=list, + help_text="A list of docker image names and tags required by `images`", + ) + deprecated = models.JSONField( + default=dict, + help_text="Dictionary mapping 'charts' and 'deployments' to list " + "of deprecated images.", + ) + + airbyte_version = models.CharField(max_length=32, default="") + airflow_version = models.CharField(max_length=32, default="") + airflow_providers = models.JSONField( + default=dict, + help_text="Airflow library providers dictionary.", + ) + code_server_version = models.CharField(max_length=32, default="") + dbt_version = models.CharField(max_length=32, default="") + superset_version = models.CharField(max_length=32, default="") + + code_server_libraries = models.JSONField( + default=dict, + help_text="Dictionary of python library names to versions which will " + "be installed on code server by default, unless overridden by a " + "profile image set.", + ) + code_server_extensions = models.JSONField( + default=dict, + help_text="Dictionary of VS Code extension names to versions which " + "will be installed on a code server by default, unless overridden by " + "a profile image set.", + ) + + profile_flags = models.JSONField( + default=dict, + help_text="Dictionary mapping environment profiles to dictionaries " + "of flags.", + ) + + channels = models.JSONField(default=list) + + objects = ReleaseManager() + + def __str__(self): + return self.name + + @property + def is_pre(self) -> bool: + """Returns True if this is a pre release""" + + # FIXME: Deprecate releases starting with draft + return self.name.startswith("pre") or self.name.startswith("draft") + + @property + def version_components(self): + """Splits the version into a 3 way tuple of major, minor, patch + If it is a pre release, this returns None.""" + + if self.is_pre: + return self.name.split("-") + return self.name.split(".") + + def get_service_image(self, service: str, repo: str, tag_prefix=None): + """Returns a service image tuple repo, tag""" + for image in getattr(self, f"{service}_images"): + image, tag = image.split(":") + if repo == image and (not tag_prefix or tag.startswith(tag_prefix)): + return image, tag + raise KeyError(f"Repo {repo} not found in release '{self}' {service} images") + + def get_image(self, repo: str): + """Returns an image tuple repo, tag""" + tag = self.images.get(repo) + if tag: + return repo, tag + tag = self.ci_images.get(repo) + if tag: + return repo, tag + tag = self._get_image_core(repo=repo) + if tag: + return repo, tag + + raise KeyError(f"Repo {repo} not found in release '{self}' images") + + def _get_image_core(self, repo: str): + """Get the core image for 'repo' without the tag or None if the + image could not be found. + """ + + images = list(filter(lambda x: x.startswith(repo), self.core_images)) + if images: + return images[0].split(":")[1] + + return None + + def is_supported(self, env) -> bool: + """Returns True if a given environment's services are supported by + this release. + """ + + return ( + ( + not env.is_service_enabled(settings.SERVICE_AIRBYTE) + or self.airbyte_chart.get("version") in AirbyteAdapter.chart_versions + ) + and ( + not env.is_service_enabled(settings.SERVICE_AIRFLOW) + or self.airflow_chart.get("version") in AirflowAdapter.chart_versions + ) + and ( + not env.is_service_enabled(settings.SERVICE_SUPERSET) + or self.superset_chart.get("version") in SupersetAdapter.chart_versions + ) + ) diff --git a/src/core/api/app/projects/models/repository.py b/src/core/api/app/projects/models/repository.py new file mode 100644 index 00000000..2fa1eb34 --- /dev/null +++ b/src/core/api/app/projects/models/repository.py @@ -0,0 +1,425 @@ +from core.fields import EncryptedTextField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.db import models +from users.models import User + +from ..cryptography import ( + DSA_KEY_TYPE, + ECDSA_KEY_TYPE, + ECDSA_SK_KEY_TYPE, + ED25519_KEY_TYPE, + ED25519_SK_KEY_TYPE, + RSA_KEY_TYPE, + generate_azure_keypair, + generate_ssh_key_pair, + generate_ssh_public_key, + generate_ssl_key_pair, + generate_ssl_public_key, +) + + +class Repository(AuditModelMixin, DatacovesModel): + """Definition for a source code repository used by a project + + These are all GIT repositories, but they can come from several different + providers. + + ========= + Constants + ========= + + - PROVIDER_GITHUB + - PROVIDER_GITLAB + - PROVIDER_BITBUCKET + - PROVIDERS - Tuple of tuple-pairs for populating a select box + + ======= + Methods + ======= + + - **save(...)** - Overidden to enforce git_url to be lower case and + to set provider if left unset when creating a new Repository. + """ + + PROVIDER_GITHUB = "github" + PROVIDER_GITLAB = "gitlab" + PROVIDER_BITBUCKET = "bitbucket" + PROVIDERS = ( + ( + PROVIDER_GITHUB, + "Github", + ), + ( + PROVIDER_GITLAB, + "Gitlab", + ), + ( + PROVIDER_BITBUCKET, + "BitBucket", + ), + ) + + git_url = models.CharField( + max_length=250, + unique=True, + help_text="This may be a URL, or a ssh path such as: " + "git@github.com:GROUP/REPO.git ... it will be forced to lower case " + "on save.", + ) + url = models.URLField( + max_length=250, + blank=True, + null=True, + help_text="This only supports a URL and is optional.", + ) + provider = models.CharField(max_length=60, choices=PROVIDERS, null=True, blank=True) + + class Meta: + verbose_name_plural = "repositories" + + def __str__(self): + return self.git_url + + def save(self, *args, **kwargs): + """Enforces git_url to be lower case, and guesses which provider + based on URL on creating a new repo. + """ + + self.git_url = self.git_url.lower() + if not self.pk and not self.provider: + if "github.com" in self.git_url: + self.provider = self.PROVIDER_GITHUB + elif "gitlab.com" in self.git_url: + self.provider = self.PROVIDER_GITLAB + elif "bitbucket.org" in self.git_url: + self.provider = self.PROVIDER_BITBUCKET + super().save(*args, **kwargs) + + +class UserRepository(AuditModelMixin, DatacovesModel): + """Extends :model:`projects.Repository` to provide user level credentials + + This is for user-defined repositories which may be used on code server + for example. It uses Repository as a base but adds in ssh key information. + Does not currently support HTTP. + """ + + user = models.ForeignKey( + "users.User", on_delete=models.CASCADE, related_name="repositories" + ) + repository = models.ForeignKey( + Repository, on_delete=models.CASCADE, related_name="users" + ) + ssh_key = models.ForeignKey( + "SSHKey", on_delete=models.CASCADE, related_name="users" + ) + validated_at = models.DateTimeField(blank=True, null=True) + + class Meta: + verbose_name_plural = "user repositories" + constraints = [ + models.UniqueConstraint( + fields=["user", "repository"], + name="User repository uniqueness", + ) + ] + + def __str__(self): + return f"{self.user}:{self.repository}" + + +class SSHKeyManager(models.Manager): + def new( + self, + created_by: User = None, + associate: bool = False, + private: str = None, + usage: str = None, + key_type: str = None, + ): + if private: + key = generate_ssh_public_key(private) + else: + key = generate_ssh_key_pair(key_type=key_type or ED25519_KEY_TYPE) + + if created_by: + key["created_by"] = created_by + if usage: + key["usage"] = usage + + key["generated"] = not private + instance = self.create(**key) + if associate: + instance.associate_to_user_repos() + + return instance + + +class SSHKey(AuditModelMixin, DatacovesModel): + """Storage for SSH Keys used by environments + + This uses a manager called SSHKeyManager that provides the method + **new(created_by, associate, private, usage)** to generate keypairs + automatically. + + ========= + Constants + ========= + + --------- + Key Types + --------- + + - KEY_TYPE_DSA + - KEY_TYPE_ECDSA + - KEY_TYPE_ECDSA_SK + - KEY_TYPE_ED25519 + - KEY_TYPE_ED25519_SK + - KEY_TYPE_RSA + - KEY_TYPES - Tuple of tuple pairs for populating select box + + --------- + Key Usage + --------- + + - USAGE_USER + - USAGE_PROJECT + - USAGES - Tuple of tuple pairs for populating select box + + ======= + Methods + ======= + + - **associate_to_user_repos(projects=None)** - + Creates/Updates UserRepositories for each project in 'projects' + to use this SSH Key. This will not work on USAGE_PROJECT keys. + - **save(...)** - SSH Private keys must use only \\\\n as newlines and + must end with a new line + """ + + KEY_TYPE_DSA = DSA_KEY_TYPE + KEY_TYPE_ECDSA = ECDSA_KEY_TYPE + KEY_TYPE_ECDSA_SK = ECDSA_SK_KEY_TYPE + KEY_TYPE_ED25519 = ED25519_KEY_TYPE + KEY_TYPE_ED25519_SK = ED25519_SK_KEY_TYPE + KEY_TYPE_RSA = RSA_KEY_TYPE + + KEY_TYPES = ( + ( + KEY_TYPE_DSA, + "dsa", + ), + ( + KEY_TYPE_ECDSA, + "ecdsa", + ), + ( + KEY_TYPE_ECDSA_SK, + "ecdsa-sk", + ), + ( + KEY_TYPE_ED25519, + "ed25519", + ), + ( + KEY_TYPE_ED25519_SK, + "ed25519-sk", + ), + ( + KEY_TYPE_RSA, + "rsa", + ), + ) + + USAGE_USER = "user" + USAGE_PROJECT = "project" + USAGES = ( + ( + USAGE_USER, + "User", + ), + ( + USAGE_PROJECT, + "Project", + ), + ) + + key_type = models.CharField( + max_length=20, choices=KEY_TYPES, default=KEY_TYPE_ED25519 + ) + private = EncryptedTextField() + public = models.TextField() + usage = models.CharField(max_length=20, choices=USAGES, default=USAGE_USER) + generated = models.BooleanField( + default=True, help_text="If not generated, it means the user provided it." + ) + created_by = models.ForeignKey( + "users.User", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="ssh_keys", + ) + + objects = SSHKeyManager() + + class Meta: + verbose_name = "SSH key" + + def __str__(self): + return f"{self.id}:{self.public_short}" + + @property + def public_short(self): + """Returns 'short code' of the public hash for display purposes""" + return self.public[:100] + + def associate_to_user_repos(self, projects=None): + """Creates/Updates UserRepositories for each project in 'projects' + to use this SSH Key. This will not work on USAGE_PROJECT keys. + """ + + if self.usage == self.USAGE_USER: + # Associating key to each user repository + for project in projects or self.created_by.projects: + UserRepository.objects.update_or_create( + user=self.created_by, + repository=project.repository, + defaults={"ssh_key": self}, + ) + else: + raise ValueError( + "Service SSH keys are not supposed to be assigned to user repositories" + ) + + def save(self, *args, **kwargs): + """SSH Private keys must use only \n as newlines and must end with a new line""" + self.private = self.private.replace("\r\n", "\n") + if self.private[-1:] != "\n": + self.private += "\n" + + self.public = self.public.replace("\r\n", "\n") + return super().save(*args, **kwargs) + + +class SSLKeyManager(models.Manager): + def new( + self, + created_by: User = None, + private: str = None, + usage: str = "user", + format: str = "snowflake", + ): + if format == "azure": + key = generate_azure_keypair() + else: + if private: + key = generate_ssl_public_key(private.strip()) + else: + key = generate_ssl_key_pair(RSA_KEY_TYPE) + + key["generated"] = not private + key["usage"] = usage + + if created_by: + key["created_by"] = created_by + + return self.create(**key) + + +class SSLKey(AuditModelMixin, DatacovesModel): + """Storage for SSL keys used by environments + + This uses a manager called SSLKeyManager that provides the method + **new(created_by, private)** to generate keypairs automatically. + + ========= + Constants + ========= + + --------- + Key Types + --------- + + - KEY_TYPE_DSA + - KEY_TYPE_RSA + - KEY_TYPES - tuple of tuple pairs for populating select boxes + + ---------- + Key Usages + ---------- + + - USAGE_USER + - USAGE_PROJECT + - USAGES - Tuple of tuple pairs for populating select box + + ======= + Methods + ======= + + - **save(...)** - SSL Private keys must use only \\\\n as newlines and + must end with a new line + """ + + KEY_TYPE_DSA = "dsa" + KEY_TYPE_RSA = "rsa" + + KEY_TYPES = ( + ( + KEY_TYPE_DSA, + "dsa", + ), + ( + KEY_TYPE_RSA, + "rsa", + ), + ) + + USAGE_USER = "user" + USAGE_PROJECT = "project" + USAGES = ( + ( + USAGE_USER, + "User", + ), + ( + USAGE_PROJECT, + "Project", + ), + ) + + key_type = models.CharField(max_length=20, choices=KEY_TYPES, default=KEY_TYPE_RSA) + private = EncryptedTextField() + public = models.TextField() + usage = models.CharField(max_length=20, choices=USAGES, default=USAGE_USER) + generated = models.BooleanField(default=True) + created_by = models.ForeignKey( + "users.User", + on_delete=models.SET_NULL, + null=True, + blank=True, + related_name="ssl_keys", + ) + + objects = SSLKeyManager() + + class Meta: + verbose_name = "SSL key" + + def __str__(self): + return f"{self.id}:{self.public_short}" + + @property + def public_short(self): + """Returns a short version of the public key for display purposes""" + return self.public[:100] + + def save(self, *args, **kwargs): + """SSL Private keys must use only \n as newlines and must end with a new line""" + self.private = self.private.replace("\r\n", "\n") + if self.private[-1:] != "\n": + self.private += "\n" + + self.public = self.public.replace("\r\n", "\n") + return super().save(*args, **kwargs) diff --git a/src/core/api/app/projects/models/user_environment.py b/src/core/api/app/projects/models/user_environment.py new file mode 100644 index 00000000..0c2a9a76 --- /dev/null +++ b/src/core/api/app/projects/models/user_environment.py @@ -0,0 +1,265 @@ +import random +import string + +from core.fields import EncryptedJSONField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.core.exceptions import ValidationError +from django.db import IntegrityError, models, transaction +from django.utils import timezone +from django.utils.text import slugify + + +class UserEnvironment(AuditModelMixin, DatacovesModel): + """The UserEnvironment Model is for per-user Environments + + This is currently Code Server. A user environment can be private, + public, shared, or require authentication. User environments are + children of a parent 'shared' environment. + + ========= + Constants + ========= + + - ACCESS_PRIVATE + - ACCESS_AUTHENTICATED + - ACCESS_PUBLIC + - CODE_SERVER_ACCESS - Tuple of tuple pairs for populating select box + + ======= + Methods + ======= + + - **clean()** - Private method to handle validation + - **save(...)** - Overridden for both validation and to generate share + codes if needed. This can kick off a workspace sync. + - **_generate_share_code()** - Private method to generate a random share + code. Used by save() + - **_get_standardize_exposures()** - Slufigy service keys and + transform configs to strings + - **restart_code_server()** - Mark the code server as restarted, + then saves. This will trigger a workspace sync. + - **is_service_valid(service_name)** - Is the given service valid + - **enabled_and_valid_services()** Returns a set of valid, enabled + services. + """ + + ACCESS_PRIVATE = "private" + ACCESS_AUTHENTICATED = "authenticated" + ACCESS_PUBLIC = "public" + CODE_SERVER_ACCESS = ( + ( + ACCESS_PRIVATE, + "private", + ), + ( + ACCESS_AUTHENTICATED, + "authenticated", + ), + ( + ACCESS_PUBLIC, + "public", + ), + ) + + environment = models.ForeignKey( + "Environment", on_delete=models.CASCADE, related_name="user_environments" + ) + user = models.ForeignKey( + "users.User", on_delete=models.CASCADE, related_name="user_environments" + ) + + heartbeat_at = models.DateTimeField(default=timezone.now) + code_server_active = models.BooleanField(default=False) + code_server_local_airflow_active = models.BooleanField(default=False) + + code_server_last_shared_at = models.DateTimeField( + blank=True, + null=True, + help_text="For security reasons, access will be changed back to private after" + " 2 hours elapsed from this datetime", + ) + code_server_share_code = models.CharField( + max_length=10, + null=True, + blank=True, + unique=True, + help_text="This is automatically generated to be a random value " + "on save() if code_server_access isn't ACCESS_PRIVATE", + ) + code_server_access = models.CharField( + max_length=50, + choices=CODE_SERVER_ACCESS, + default=ACCESS_PRIVATE, + help_text="Who can access code-server? Change with caution as this configuration" + " may expose sensitive information.", + ) + exposures = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="Dict of http services listening on code server pod, i.e. " + '{"django": {"port": 3000, "access": "private", "websockets": "true"}}', + ) + code_server_restarted_at = models.DateTimeField(default=timezone.now) + variables = EncryptedJSONField( + default=dict, + null=True, + blank=True, + help_text="Dictionary of key-value pairs for environment variables", + ) + services = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="Dict to handle the state of services at user level and unmet preconditions if found.", + ) + local_airflow_config = EncryptedJSONField(default=dict, blank=True, null=True) + code_server_config = EncryptedJSONField( + default=dict, + blank=True, + null=True, + help_text="Extra configuration for user's code-server", + ) + + class Meta: + constraints = [ + models.UniqueConstraint( + fields=["environment", "user"], + name="Environment user uniqueness", + ) + ] + + def __str__(self): + return f"{self.environment}:{self.user}" + + @property + def env(self): + return self.environment + + @property + def share_links(self): + """Returns a dictionary of links for the different services provided + by this user environment (see the 'services' field) to URLs which + can be used to access the environment if shared.""" + + links = {} + code_server_url_sufix = ( + f"{self.environment.slug}.{self.environment.cluster.domain}" + ) + if self.code_server_access != self.ACCESS_PRIVATE: + code_server_url = ( + f"https://{self.code_server_share_code}-{code_server_url_sufix}" + ) + links["code-server"] = code_server_url + for service, options in self.exposures.items(): + links[service] = f"https://{options['share_code']}-{code_server_url_sufix}" + return links + + @property + def is_code_server_enabled(self) -> bool: + return ( + self.code_server_active + and not self.environment.project.account.is_suspended( + self.environment.cluster + ) + and self.is_service_valid(settings.SERVICE_CODE_SERVER) + ) + + def clean(self): + """Validate ports in the 'exposures' dictionaries. Raises + ValidationError if there is a problem""" + + for key, value in self.exposures.items(): + port = value["port"] + if port: + if not str(port).isdigit(): + raise ValidationError( + f"'port' field is not a number on service '{key}' dict." + ) + else: + raise ValidationError( + f"'port' field not found on service '{key}' dict." + ) + + def _generate_share_code(self) -> str: + """Generate a random share code""" + + return "".join(random.choice(string.ascii_lowercase) for i in range(10)) + + def _get_standardize_exposures(self): + """slugify service keys and transform configs to strings""" + standardize_exposures = {} + for exposure, options in self.exposures.items(): + exposure_key = slugify(exposure) + standardize_exposures[exposure_key] = {} + # Generate share codes for shared exposures + if not options.get("share_code"): + options["share_code"] = self._generate_share_code() + + if not options.get("websockets"): + options["websockets"] = "false" + + for key, value in options.items(): + standardize_exposures[exposure_key][key] = str(value) + return standardize_exposures + + def save(self, *args, **kwargs): + """Do validation via 'clean' and set up share code if needed. + This can trigger a workspace sync.""" + + self.clean() + self.exposures = self._get_standardize_exposures() + + if self.pk and self.code_server_access != self.ACCESS_PRIVATE: + old_version = UserEnvironment.objects.get(id=self.pk) + if old_version.code_server_access == self.ACCESS_PRIVATE: + # If previous version was private + self.code_server_last_shared_at = timezone.now() + self.code_server_share_code = self._generate_share_code() + + retries = 5 + exception = None + while retries > 0: + retries -= 1 + try: + with transaction.atomic(): + return super().save(*args, **kwargs) + except IntegrityError as e: + if "projects_userenvironment_code_server_share_code_key" in str(e): + exception = e + self.code_server_share_code = self._generate_share_code() + else: + raise e + if exception: + raise exception + + def restart_code_server(self): + """Restarts the code server deployment by changing an annotation""" + self.code_server_restarted_at = timezone.now() + self.save() + + def enabled_local_airflow(self): + """Enable local Airflow""" + if not self.code_server_local_airflow_active: + self.code_server_local_airflow_active = True + self.restart_code_server() + + def is_service_valid(self, service_name) -> bool: + """Checks if a services is valid""" + if self.services: + # If the service is disabled in the environment we do not need to validate it. + if not self.environment.is_service_enabled_and_valid(service_name): + return False + + service = self.services.get(service_name, {}) + valid = service.get("valid", False) + assert isinstance(valid, bool) + return valid + + return False + + def enabled_and_valid_services(self): + """Returns a set of valid services from the 'services' field""" + return {service for service in self.services if self.is_service_valid(service)} diff --git a/src/core/api/app/projects/models/webhook.py b/src/core/api/app/projects/models/webhook.py new file mode 100644 index 00000000..0dc60051 --- /dev/null +++ b/src/core/api/app/projects/models/webhook.py @@ -0,0 +1,207 @@ +import re +import uuid + +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from dateutil import parser +from django.db import models, transaction +from django.template.loader import render_to_string +from notifications.models import AccountNotification + +from lib.tools import get_related_account, get_related_environment + + +class BlockedPodCreationRequest(AuditModelMixin, DatacovesModel): + """Tracks Blocked Creation Pod Requests + + This is used for error tracking and notification purposes. This + object is loaded up with a web request object (using set_request), + and then is_allowed_to_run is checked. If True, we do not save + this object and wet permit the pod creation. If False, we'll save + this object and trigger a notification, then deny the pod creation. + + ========= + Constants + ========= + + - AIRFLOW_POD - Airflow pod name + - AIRBYTE_POD - Airbyte pod name + - PATTERN_DICT - Dictionary mapping the above two constants to patterns + to check if a given pod label matches that pod type. Used by + pod_kind + + ======= + Methods + ======= + + - **set_request(request)** - Sets the object with the web request and + triggers parse_request. This is the "entrypoint" for this process. + - **parse_request()** - Parses self.request into different fields on + this object. Not usually called since set_request does it for you. + - **is_allowed_to_run()** - Returns True if the pod is allowed to run. + - **send_notification_email(...)** - Sends notification email, this is + triggered by save(...) so you usually don't need to do this yourself. + Takes kwargs but ignores them. + - **save(...)** - Overrides 'save' to send notification email after + the transaction is committed. + """ + + AIRFLOW_POD = "airflow-worker" + AIRBYTE_POD = "airbyte" + + PATTERN_DICT = { + AIRFLOW_POD: ".+", + AIRBYTE_POD: "worker-pod", + } + + id = models.UUIDField( + default=uuid.uuid4, editable=False, unique=True, primary_key=True + ) + request = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="Request received by webhook, as JSON dictionary", + ) + response = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="Response sent, as JSON dictionary", + ) + request_uid = models.UUIDField(null=True, blank=True) + + uid = models.UUIDField(null=True, blank=True) + creation_timestamp = models.DateTimeField(null=True, blank=True) + kind = models.CharField(max_length=200, null=True, blank=True) + name = models.CharField(max_length=200, null=True, blank=True) + namespace = models.CharField(max_length=200, null=True, blank=True) + + def set_request(self, request): + """Set our request field then trigger parse_request to pull the + data out of it into the other fields + """ + + self.request = request + self.parse_request() + + def parse_request(self): + """Normally, you would not call this; it is called by set_request. + Pulls data out of the request and sets corresponding fields on the + model object. + """ + + _request = self._request + self.request_uid = _request.get("uid") + request_object = self._object + object_metadata = self._metadata + self.uid = object_metadata.get("uid") + self.kind = request_object.get("kind") + self.name = object_metadata.get("name") + self.namespace = object_metadata.get("namespace") + creation_timestamp = object_metadata.get("creationTimestamp") + self.creation_timestamp = ( + parser.isoparse(creation_timestamp) if creation_timestamp else None + ) + + @property + def _request(self): + """Override request to make sure it is always a dictionary""" + + return self.request.get("request", {}) or {} + + @property + def _object(self): + """Override request's object field to make sure it is always a + dictionary""" + return self._request.get("object", {}) or {} + + @property + def _metadata(self): + """Pulls metadata out of request's object field and ensures it is + a dictionary""" + return self._object.get("metadata", {}) or {} + + @property + def _labels(self): + """Return labels from request's object field""" + + return self._metadata.get("labels", {}) or {} + + def is_allowed_to_run(self) -> bool: + """Validates if this pod is allowed to run, returning True if it is + permitted or False otherwise. It uses pod_kind to determine the + type of pod, and then checks execution limits vs. plan limits. + """ + + environment = get_related_environment(self.namespace) + cluster = environment.cluster + if not cluster.is_feature_enabled("block_workers"): + return True + if self.namespace and self.pod_kind in [self.AIRBYTE_POD, self.AIRFLOW_POD]: + account = get_related_account(environment) + if account and account.on_starter_plan: + workers_execution_limit = account.workers_execution_limit_per_period + limit = None + total = 0 + if self.pod_kind == self.AIRFLOW_POD: + limit = workers_execution_limit.get("airflow") + total = account.airflow_workers_seconds_sum + elif self.pod_kind == self.AIRBYTE_POD: + limit = workers_execution_limit.get("airbyte") + total = account.airbyte_workers_seconds_sum + if limit and total > limit: + return False + return True + + @property + def pod_kind(self): + """Introspects the pod kind based on the labels""" + + labels = self._labels + if self.AIRBYTE_POD in labels: + value = labels.get(self.AIRBYTE_POD) + if re.search(self.PATTERN_DICT.get(self.AIRBYTE_POD), value): + return self.AIRBYTE_POD + elif self.AIRFLOW_POD in labels: + value = labels.get(self.AIRFLOW_POD) + if re.search(self.PATTERN_DICT.get(self.AIRFLOW_POD), value): + return self.AIRFLOW_POD + else: + return None + + def send_notification_email(self, **kwargs): + """Send a notification email for when pod creation is blocked""" + + environment = get_related_environment(self.namespace) + account = get_related_account(environment) + + ctx = { + "account_name": account.name, + "name": account.owned_by.name, + } + + content_template = "notifications/content/blocked_pod_creation_message.html" + body = render_to_string(content_template, ctx).strip() + subject_template = "notifications/content/blocked_pod_creation_subject.txt" + title = render_to_string(subject_template, ctx).strip() + account_notification = AccountNotification( + environment=environment, + account=account, + title=title, + body=body, + kind=AccountNotification.KIND_CLUSTER, + ) + account_notification.deactivate_channel("slack") + account_notification.save(send_on_save=True) + + def save(self, *args, **kwargs): + """Queues the send of a notification email on save of object""" + + from projects.tasks import send_notification_email + + transaction.on_commit(lambda: send_notification_email.delay(str(self.id))) + super().save() + + def __str__(self): + return f"{self.id}" diff --git a/src/core/api/app/projects/permissions.py b/src/core/api/app/projects/permissions.py new file mode 100644 index 00000000..a75a8045 --- /dev/null +++ b/src/core/api/app/projects/permissions.py @@ -0,0 +1,42 @@ +from clusters.request_utils import get_cluster +from rest_framework import permissions + + +class IsProjectsAdminEnabled(permissions.BasePermission): + message = "Projects admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_projects"] or features["admin_environments"] + + +class IsEnvironmentsAdminEnabled(permissions.BasePermission): + message = "Environments admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_environments"] + + +class IsConnectionsAdminEnabled(permissions.BasePermission): + message = "Connections admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_connections"] + + +class IsServiceCredentialsAdminEnabled(permissions.BasePermission): + message = "Service credentials admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_service_credentials"] + + +class IsProfilesAdminEnabled(permissions.BasePermission): + message = "Profiles admin feature is not enabled" + + def has_permission(self, request, view): + features = get_cluster(request).all_features + return features["admin_profiles"] diff --git a/src/core/api/app/projects/runners/__init__.py b/src/core/api/app/projects/runners/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/projects/runners/run_on_bigquery.py b/src/core/api/app/projects/runners/run_on_bigquery.py new file mode 100644 index 00000000..feebb0eb --- /dev/null +++ b/src/core/api/app/projects/runners/run_on_bigquery.py @@ -0,0 +1,39 @@ +import base64 +import json +import os +import sys +import tempfile + +from google.auth.exceptions import DefaultCredentialsError +from google.cloud import bigquery + +connection_script = None +if len(sys.argv) > 2: + connection_script = sys.argv[2] + conn_script_bytes = connection_script.encode("utf-8") + connection_script_string = base64.b64decode(conn_script_bytes).decode("utf-8") +connection_bytes = sys.argv[1] +conn_data_bytes = connection_bytes.encode("utf-8") +connection_string = base64.b64decode(connection_bytes).decode("utf-8") +conn_data = json.loads(connection_string) +credentials_dict = conn_data["keyfile_json"] + + +credentials_json_str = json.dumps(credentials_dict) + +with tempfile.TemporaryDirectory() as tmp_dir: + try: + credentials_path = f"{tmp_dir}/google_application_credentials.json" + with open(credentials_path, "w+") as f: + f.write(credentials_json_str) + os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = f.name + client = bigquery.Client() + if connection_script: + client.query(connection_script_string) + exit(0) + except DefaultCredentialsError as dce: + sys.stdout.write(str(dce)) + exit(13) + except Exception as exc: + sys.stdout.write(str(exc)) + exit(1) diff --git a/src/core/api/app/projects/runners/run_on_databricks.py b/src/core/api/app/projects/runners/run_on_databricks.py new file mode 100644 index 00000000..132c8bc0 --- /dev/null +++ b/src/core/api/app/projects/runners/run_on_databricks.py @@ -0,0 +1,38 @@ +import base64 +import json +import sys + +from databricks import sql as databricks_sql +from databricks.sql import Error as DatabricksError + +connection_script = None +if len(sys.argv) > 2: + connection_script = sys.argv[2] + conn_script_bytes = connection_script.encode("utf-8") + connection_script_string = base64.b64decode(conn_script_bytes).decode("utf-8") +connection_bytes = sys.argv[1] +conn_data_bytes = connection_bytes.encode("utf-8") +connection_string = base64.b64decode(connection_bytes).decode("utf-8") +connection = json.loads(connection_string) + + +try: + host = connection["host"] + http_path = connection["http_path"] + token = connection["token"] + + connection = databricks_sql.connect( + server_hostname=host, http_path=http_path, access_token=token + ) + + if connection: + if connection_script: + connection.cursor().execute(connection_script_string) + connection.close() + exit(0) +except DatabricksError as err: + sys.stdout.write(str(err)) + exit(13) +except Exception as exc: + sys.stdout.write(str(exc)) + exit(1) diff --git a/src/core/api/app/projects/runners/run_on_redshift.py b/src/core/api/app/projects/runners/run_on_redshift.py new file mode 100644 index 00000000..520777df --- /dev/null +++ b/src/core/api/app/projects/runners/run_on_redshift.py @@ -0,0 +1,45 @@ +import base64 +import json +import sys + +import redshift_connector +from redshift_connector import Error as RedshiftError + +connection_script = None +if len(sys.argv) > 2: + connection_script = sys.argv[2] + conn_script_bytes = connection_script.encode("utf-8") + connection_script_string = base64.b64decode(conn_script_bytes).decode("utf-8") +connection_bytes = sys.argv[1] +conn_data_bytes = connection_bytes.encode("utf-8") +connection_string = base64.b64decode(connection_bytes).decode("utf-8") +connection = json.loads(connection_string) + + +try: + user = connection["user"] + password = connection["password"] + host_parts = connection["host"].split(":") + host = host_parts[0] + port = None + if len(host_parts) > 1: + port = int(host_parts[1]) + database = connection["database"] + + connection = redshift_connector.connect( + user=user, password=password, host=host, port=port, database=database + ) + + if connection: + if connection_script: + connection.cursor().execute(connection_script_string) + connection.close() + exit(0) +except RedshiftError as err: + if isinstance(err.args[0], dict): + err_dict = err.args[0] + sys.stdout.write(str(err_dict.get("M", err_dict.get("R", "Unspecific error")))) + exit(13) + else: + sys.stdout.write(str(err)) + exit(1) diff --git a/src/core/api/app/projects/runners/run_on_snowflake.py b/src/core/api/app/projects/runners/run_on_snowflake.py new file mode 100644 index 00000000..a7d4176e --- /dev/null +++ b/src/core/api/app/projects/runners/run_on_snowflake.py @@ -0,0 +1,63 @@ +import base64 +import json +import sys + +import requests +import snowflake.connector +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives import serialization +from requests.exceptions import SSLError +from snowflake.connector.errors import Error as SnowflakeError +from snowflake.connector.errors import ForbiddenError as SnowflakeForbiddenError +from snowflake.connector.errors import InterfaceError, ProgrammingError + +connection_script = None +if len(sys.argv) > 2: + connection_script = sys.argv[2] + conn_script_bytes = connection_script.encode("utf-8") + connection_script_string = base64.b64decode(conn_script_bytes).decode("utf-8") +connection_bytes = sys.argv[1] +conn_data_bytes = connection_bytes.encode("utf-8") +connection_string = base64.b64decode(connection_bytes).decode("utf-8") +connection = json.loads(connection_string) + + +def pem_private_key_to_der(private: str): + """Receives a private key PEM encoded and returns it DER encoded""" + pemkey = serialization.load_pem_private_key( + str.encode(private), None, default_backend() + ) + + # Serialize it to DER format + return pemkey.private_bytes( + encoding=serialization.Encoding.DER, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + ) + + +try: + account = connection["account"] + sf_url = f"https://{account}.snowflakecomputing.com" + r = requests.get(sf_url) + if r.status_code == 403: + raise SnowflakeError(f"Snowflake Account {account} does not exist") + connection["login_timeout"] = 20 + if "private_key" in connection: + connection["private_key"] = pem_private_key_to_der(connection["private_key"]) + connection = snowflake.connector.connect(**connection) + if connection: + if connection_script: + connection.cursor().execute(connection_script_string) + connection.close() + exit(0) +except (SSLError, InterfaceError): + # SSL Error or InterfaceError are caused by a wrong subdomain thus a wrong account + sys.stdout.write(f"Snowflake Account {account} does not exist") + exit(13) +except (SnowflakeForbiddenError, SnowflakeError, ProgrammingError) as err: + sys.stdout.write(err.raw_msg.replace("\n", " ")) + exit(13) +except Exception as exc: + sys.stdout.write(str(exc)) + exit(1) diff --git a/src/core/api/app/projects/runners/utils.py b/src/core/api/app/projects/runners/utils.py new file mode 100644 index 00000000..0ba46931 --- /dev/null +++ b/src/core/api/app/projects/runners/utils.py @@ -0,0 +1,19 @@ +import base64 +import json + +SQL_RUNNERS_VIRTUALENVS = { + "databricks": "DATABRICKS_VIRTUALENV", + "snowflake": "SNOWFLAKE_VIRTUALENV", + "redshift": "REDSHIFT_VIRTUALENV", + "bigquery": "BIGQUERY_VIRTUALENV", +} + + +def get_connection_b64(connection: dict): + conn_data_str = json.dumps(connection).encode("utf-8") + return base64.b64encode(conn_data_str).decode("utf-8") + + +def get_script_b64(script: str): + script_str = script.encode("utf-8") + return base64.b64encode(script_str).decode("utf-8") diff --git a/src/core/api/app/projects/serializers.py b/src/core/api/app/projects/serializers.py new file mode 100644 index 00000000..5045f876 --- /dev/null +++ b/src/core/api/app/projects/serializers.py @@ -0,0 +1,1058 @@ +import json +import logging +import re + +from billing.models import Plan +from clusters.adapters.airflow import AirflowAdapter +from clusters.adapters.code_server import CodeServerAdapter +from clusters.adapters.dbt_docs import DbtDocsAdapter +from clusters.models import Cluster +from clusters.request_utils import get_cluster +from clusters.workspace import get_workloads_status, sync +from codegen.templating import build_user_context +from django.conf import settings +from django.contrib.auth.models import Group +from django.db import transaction +from django.utils import timezone +from iam.models import DatacovesToken +from rest_framework import serializers +from rest_framework_recursive.fields import RecursiveField +from users.models import Account, ExtendedGroup +from users.serializers import AccountSerializer + +from lib.airflow import push_secrets_to_airflow +from lib.dicts import deep_merge + +from .models import ( + ConnectionTemplate, + ConnectionType, + Environment, + EnvironmentIntegration, + Profile, + ProfileFile, + Project, + Release, + Repository, + ServiceCredential, + SSHKey, + UserCredential, + UserEnvironment, + UserRepository, +) + +logger = logging.getLogger(__name__) + + +def update_json_field(instance, validated_data: dict, attr: str): + """Gets original values from instance, applies changes from validated_data, and updates validated_data dict""" + overrides = getattr(instance, attr).copy() + validated_data[attr] = deep_merge(validated_data.get(attr, {}), overrides) + + +class RepositoryReadOnlySerializer(serializers.ModelSerializer): + """This repo disables validations and saving instance, it's used just to return data""" + + class Meta: + model = Repository + fields = ("git_url", "url", "provider") + + extra_kwargs = {"git_url": {"validators": []}, "url": {"validators": []}} + + def save(self, **kwargs): + return self.instance + + +class EnvironmentKeysSerializer(serializers.ModelSerializer): + class Meta: + model = Environment + fields = ("id", "slug") + + def __init__(self, *argc, **kwargs): + super().__init__(*argc, **kwargs) + + # Keep track of the new token created. + self.new_token = None + + def to_representation(self, env): + data = super().to_representation(env) + + data[ + "airflow_api_url" + ] = f"https://api-airflow-{env.slug}.{env.cluster.domain}/api/v1/" + + tokens = [] + + for token in DatacovesToken.objects.filter(environment=env, is_system=False): + tokens.append(token.token_key) + + data["tokens"] = tokens + + if self.new_token is not None: + data["new_token"] = self.new_token + + return data + + def create(self, validated_data): + # Get our environment + env = self.context["view"].get_object() + + if not env: + raise serializers.ValidationError("Environment not found") + + # Figure out our service user, or create it if we don't have one yet. + sa_user = AirflowAdapter.setup_service_account(env) + + # Create a Knox token for the service user. + instance, token = DatacovesToken.objects.create( + user=sa_user, + expiry=None, + prefix="", + type=DatacovesToken.TYPE_ENVIRONMENT, + environment=env, + ) + + self.new_token = token + + return env + + +class EnvironmentIntegrationSerializer(serializers.ModelSerializer): + id = serializers.IntegerField(required=False) + is_notification = serializers.SerializerMethodField() + type = serializers.SerializerMethodField() + + def get_is_notification(self, obj): + return obj.integration.is_notification + + def get_type(self, obj): + return obj.integration.type + + class Meta: + model = EnvironmentIntegration + fields = ("id", "integration", "service", "is_notification", "type") + + +class EnvironmentSerializer(serializers.ModelSerializer): + integrations = EnvironmentIntegrationSerializer(many=True) + + class Meta: + model = Environment + fields = ( + "id", + "name", + "services", + "type", + "slug", + "project", + "created_at", + "dbt_home_path", + "dbt_profiles_dir", + "airflow_config", + "dbt_docs_config", + "code_server_config", + "integrations", + "variables", + "release_profile", + "settings", + ) + + def validate(self, attrs): + # new environment + if not self.instance: + account = Account.objects.get(slug=self.context["account"]) + if ( + account.on_starter_plan + and Environment.objects.filter(project__account=account).count() >= 1 + ): + raise serializers.ValidationError( + "Starter plans do not allow more than 1 environment per account." + ) + return attrs + + def _save_integrations(self, environment, integrations): + dont_delete = [ + integration.get("id") + for integration in integrations + if integration.get("id") + ] + EnvironmentIntegration.objects.filter(environment=environment).exclude( + id__in=dont_delete + ).delete() + for integration in integrations: + int_id = integration.pop("id", None) + integration["environment"] = environment + EnvironmentIntegration.objects.update_or_create( + id=int_id, defaults=integration + ) + + def create(self, validated_data): + validated_data["sync"] = True + integrations = validated_data.pop("integrations", []) + instance = super().create(validated_data) + self._save_integrations(instance, integrations) + instance.create_default_smtp_integration() + return instance + + def update(self, instance, validated_data): + # Updating just the fields that were updated, not everything + update_json_field(instance, validated_data, "airflow_config") + update_json_field(instance, validated_data, "code_server_config") + update_json_field(instance, validated_data, "dbt_docs_config") + update_json_field(instance, validated_data, "services") + integrations = validated_data.pop("integrations", []) + instance = super().update(instance, validated_data) + self._save_integrations(instance, integrations) + return instance + + def to_representation(self, instance): + user = self.context["request"].user + data = super().to_representation(instance) + data["services"] = instance.get_user_services(user=user) + data["service_credentials_count"] = instance.service_credentials.count() + data["airflow_config"] = AirflowAdapter.get_writable_config(instance) + data["code_server_config"] = CodeServerAdapter.get_writable_config(instance) + data["dbt_docs_config"] = DbtDocsAdapter.get_writable_config(instance) + return data + + +class UserEnvironmentSerializer(serializers.ModelSerializer): + env_slug = serializers.SerializerMethodField() + env_name = serializers.SerializerMethodField() + project_name = serializers.SerializerMethodField() + + class Meta: + model = UserEnvironment + fields = ( + "id", + "env_slug", + "env_name", + "project_name", + "code_server_access", + "services", + "share_links", + "variables", + "code_server_config", + ) + + def get_env_slug(self, obj: UserEnvironment): + return obj.environment.slug + + def get_env_name(self, obj: UserEnvironment): + return obj.environment.name + + def get_project_name(self, obj: UserEnvironment): + return obj.environment.project.name + + +class UserEnvironmentVariablesSerializer(serializers.ModelSerializer): + class Meta: + model = UserEnvironment + fields = ("id", "variables") + + +class ConnectionTypeSerializer(serializers.ModelSerializer): + class Meta: + model = ConnectionType + fields = ["id", "name", "slug"] + + +class ConnectionTemplateSerializer(serializers.ModelSerializer): + default_username = serializers.SerializerMethodField() + + class Meta: + model = ConnectionTemplate + fields = [ + "id", + "type", + "name", + "connection_details", + "for_users", + "project", + "user_credentials_count", + "service_credentials_count", + "type_slug", + "connection_user", + "connection_user_template", + "default_username", + ] + + def get_default_username(self, obj: ConnectionTemplate): + if ( + obj.for_users + and obj.connection_user == obj.CONNECTION_USER_FROM_EMAIL_USERNAME + ): + return self.context.get("request").user.email_username + elif obj.for_users and obj.connection_user == obj.CONNECTION_USER_FROM_TEMPLATE: + context = build_user_context(self.context.get("request").user) + return obj.connection_user_template.render(context) + elif obj.for_users and obj.connection_user == obj.CONNECTION_USER_FROM_EMAIL: + return self.context.get("request").user.email + elif ( + obj.for_users + and obj.connection_user == obj.CONNECTION_USER_FROM_EMAIL_UPPERCASE + ): + return self.context.get("request").user.email.upper() + else: + return None + + +class ProjectConnectionSerializer(ConnectionTemplateSerializer): + type = ConnectionTypeSerializer() + + +class ProjectSettingsSerializer(serializers.Serializer): + dbt_profile = serializers.CharField(required=False) + + +class MinimalEnvironmentSerializer(serializers.ModelSerializer): + class Meta: + model = Environment + fields = ( + "id", + "name", + "type", + "slug", + "project", + ) + + +class MinimalProjectSerializer(serializers.ModelSerializer): + environments = serializers.SerializerMethodField() + + def get_environments(self, obj): + user = self.context["request"].user + return MinimalEnvironmentSerializer( + user.environments.filter(project=obj), + many=True, + read_only=True, + context=self.context, + ).data + + class Meta: + model = Project + fields = ("name", "slug", "environments", "id") + + +class ProjectSerializer(serializers.ModelSerializer): + connection_templates = ConnectionTemplateSerializer(many=True, read_only=True) + repository = RepositoryReadOnlySerializer() + environments = serializers.SerializerMethodField() + secrets_secondary_backend_config = serializers.CharField( + required=False, allow_null=True, allow_blank=True + ) + + class Meta: + model = Project + fields = ( + "name", + "slug", + "settings", + "ci_home_url", + "ci_provider", + "deploy_credentials", + "deploy_key", + "azure_deploy_key", + "repository", + "clone_strategy", + "public_ssh_key", + "public_ssh_key_type", + "public_azure_key", + "release_branch", + "release_branch_protected", + "environments", + "connection_templates", + "id", + "validated_at", + "variables", + "secrets_secondary_backend", + "secrets_secondary_backend_config", + ) + + def validate(self, attrs): + # secrets_secondary_backend_config is conditionally required. + require_secrets_backend_config = False + + # new project + if not self.instance: + account = Account.objects.get(slug=self.context["account"]) + if account.on_starter_plan and account.projects.count() >= 1: + raise serializers.ValidationError( + "Starter plans do not allow more than 1 project per account." + ) + + if attrs.get("secrets_secondary_backend"): + require_secrets_backend_config = True + + else: + backend = attrs.get("secrets_secondary_backend") + + if backend and backend != self.instance.secrets_secondary_backend: + # They changed backend, so we need new config. + require_secrets_backend_config = True + + if require_secrets_backend_config: + backend_config = attrs.get("secrets_secondary_backend_config") + + if not isinstance(backend_config, dict): + raise serializers.ValidationError( + "Secrets backend configuration must be valid JSON." + ) + + return attrs + + def validate_secrets_secondary_backend_config(self, value): + """This prevents 'bespoke' validation of just this field. We will + validate it in 'validate'. + """ + + if value: + try: + return json.loads(value) + except Exception: + raise serializers.ValidationError( + "Secrets backend configuration must be valid JSON." + ) + + else: + return None + + def create(self, validated_data): + repo_data = validated_data.pop("repository") + validated_data["account"] = Account.objects.get(slug=self.context["account"]) + + # Clean up url if we need to - this removes the user creds from the + # URL if provided. Azure likes to prepend this and it causes us + # problems as a result. + if "url" in repo_data: + repo_data["url"] = re.sub("://[^@/]+@", "://", repo_data["url"]) + + with transaction.atomic(): + git_url = repo_data.pop("git_url") + validated_data["repository"], _ = Repository.objects.update_or_create( + git_url=git_url, defaults=repo_data + ) + + project = Project.objects.create(**validated_data) + self._add_user_to_project_groups(self.context.get("request").user, project) + + # A brand new project won't have environments to push secrets to yet, + # so there is no need to try and push secrets. + + return project + + def update(self, instance: Project, validated_data): + repo_data = validated_data.pop("repository") + + # Clean up url if we need to - this removes the user creds from the + # URL if provided. Azure likes to prepend this and it causes us + # problems as a result. + if "url" in repo_data: + repo_data["url"] = re.sub("://[^@/]+@", "://", repo_data["url"]) + + with transaction.atomic(): + git_url = repo_data.get("git_url") + repo, _ = Repository.objects.update_or_create( + git_url__iexact=git_url, defaults=repo_data + ) + validated_data["repository"] = repo + + if validated_data.get("secrets_secondary_backend_config") is None: + # Leave it alone if not set. + del validated_data["secrets_secondary_backend_config"] + + update_json_field(instance, validated_data, "deploy_credentials") + for key, value in validated_data.items(): + setattr(instance, key, value) + + instance.save() + + # Try to push secrets to the environments if we can and need to. + request = self.context["request"] + cluster = get_cluster(request) + + if cluster.is_feature_enabled("admin_secrets"): + for env in instance.environments.all(): + # If airflow is disabled, we don't care about this environment. + if not env.is_service_enabled("airflow"): + logger.info( + "Airflow isn't enabled for %s so we are not " + "going to update secrets.", + env.slug, + ) + continue + + # If airflow API is disabled, we won't be able to push + # secrets, so we shouldn't try. + if not env.airflow_config.get("api_enabled", False): + logger.info( + "Airflow API isn't enabled for %s so we are " + "not going to update secrets.", + env.slug, + ) + continue + + # We can only push secrets if the environment is online. If + # it isn't, then the secrets will be updated next time the + # environment is up. + env_status_cache = get_workloads_status(env) + if env_status_cache is None: + logger.info("Workloads status is not ready.") + continue + + airflow = f"{env.slug}-airflow-webserver" + if airflow not in env_status_cache or ( + not env_status_cache[airflow]["available"] + and env_status_cache[airflow]["ready_replicas"] > 0 + ): + # Airflow webserver is down, we can't push to it. + logger.info( + "Airflow webserver is down for %s so we are " + "not going to update secrets.", + env.slug, + ) + continue + + # We can push to this one. + try: + push_secrets_to_airflow(env) + sync(env, "Secrets Manager Update", True) + except Exception as e: + # Let's log the error but not do anything with it. + # The most likely reason for this to fail is airflow + # isn't ready to receive the request for some reason, + # and this sort of error can sort itself out easily + # enough by either starting/stopping airflow or just + # waiting a little bit. + # + # I want to log it, though, in case this happens a lot + # in which case there could be a bug :) But I think + # this should rarely happen. + logger.error(e) + + return instance + + def to_representation(self, instance): + rep = super().to_representation(instance) + if "git_password" in rep["deploy_credentials"]: + del rep["deploy_credentials"]["git_password"] + + if "secrets_secondary_backend_config" in rep: + del rep["secrets_secondary_backend_config"] + + return rep + + def get_environments(self, obj): + user = self.context["request"].user + return EnvironmentSerializer( + user.environments.filter(project=obj), + many=True, + read_only=True, + context=self.context, + ).data + + def _add_user_to_project_groups(self, user, project): + """Add users to default project groups""" + groups = Group.objects.filter( + extended_group__project=project, + extended_group__role__in=[ + ExtendedGroup.Role.ROLE_PROJECT_DEVELOPER, + ExtendedGroup.Role.ROLE_PROJECT_SYSADMIN, + ], + ) + for group in groups: + user.groups.add(group) + + +class ProjectKeysSerializer(serializers.ModelSerializer): + class Meta: + model = Project + fields = ("id", "slug") + + def __init__(self, *argc, **kwargs): + super().__init__(*argc, **kwargs) + + # Keep track of the new token created. + self.new_token = None + + def to_representation(self, project): + data = super().to_representation(project) + + tokens = [] + + for token in DatacovesToken.objects.filter(project=project, is_system=False): + tokens.append(token.token_key) + + data["dbt_api_url"] = f"dbt.{settings.BASE_DOMAIN}" + data["tokens"] = tokens + + if self.new_token is not None: + data["new_token"] = self.new_token + + return data + + def create(self, validated_data): + # Get our project + project = self.context["view"].get_object() + + if not project: + raise serializers.ValidationError("Project not found") + + # Figure out our service user, or create it if we don't have one yet. + sa_user = project.setup_service_account() + + # Create a Knox token for the service user. + instance, token = DatacovesToken.objects.create( + user=sa_user, + expiry=None, + prefix="", + type=DatacovesToken.TYPE_PROJECT, + project=project, + ) + + self.new_token = token + + return project + + +class AccountSetupConnectionSerializer(serializers.Serializer): + type = serializers.CharField() + connection_details = serializers.JSONField() + + +class AccountSetupRepositorySerializer(serializers.ModelSerializer): + class Meta: + model = Repository + fields = ("git_url",) + extra_kwargs = {"git_url": {"validators": []}} + + +class AirflowConfigSerializer(serializers.Serializer): + dags_folder = serializers.CharField(required=False) + yaml_dags_folder = serializers.CharField(required=False) + + +class EnvironmentConfigurationSerializer(serializers.Serializer): + dbt_home_path = serializers.CharField(required=False) + dbt_profile = serializers.CharField(required=False) + airflow_config = AirflowConfigSerializer(required=False) + + +class DbConnectionSerializer(serializers.Serializer): + type = serializers.CharField(required=False) + connection = serializers.DictField(required=False) + ssl_key_id = serializers.IntegerField(required=False) + user_credential_id = serializers.IntegerField(required=False) + service_credential_id = serializers.IntegerField(required=False) + + def validate(self, attrs): + type = attrs.get("type") + if not type: + user_credential_id = attrs.get("user_credential_id") + if user_credential_id: + user_credential = UserCredential.objects.get(id=user_credential_id) + conn_type = user_credential.connection_template.type + conn_data = user_credential.combined_connection() + ssl_key_id = user_credential.ssl_key_id + else: + service_credential_id = attrs["service_credential_id"] + service_credential = ServiceCredential.objects.get( + id=service_credential_id + ) + conn_type = service_credential.connection_template.type + conn_data = service_credential.combined_connection() + ssl_key_id = service_credential.ssl_key_id + else: + conn_type = ConnectionType.objects.get(slug=type) + conn_data = attrs.get("connection") + ssl_key_id = attrs.get("ssl_key_id") + + if ssl_key_id: + conn_data["ssl_key_id"] = ssl_key_id + conn_data_set = set(conn_data) + missing_fields = [] + + for fieldset in conn_type.required_fieldsets: + diff = conn_data_set.difference(fieldset) + if diff: + missing_fields.append(list(diff)) + else: + missing_fields.clear() + break + + if missing_fields: + raise serializers.ValidationError( + "Unexpected or missing fields: " + f"{'; '.join([', '.join(fieldset) for fieldset in missing_fields])}" + ) + return attrs + + +class GitConnectionSerializer(serializers.Serializer): + url = serializers.CharField(required=False) + key_id = serializers.IntegerField(required=False) + user_repository_id = serializers.IntegerField(required=False) + project_id = serializers.IntegerField(required=False) + branch = serializers.CharField(required=False) + get_dbt_projects = serializers.BooleanField(required=False) + + +class AccountSetupSerializer(serializers.Serializer): + """If account_slug is None, it means that this is a Trial account without a subscription""" + + account_slug = serializers.CharField(required=False) + account_name = serializers.CharField() + plan = serializers.CharField(required=False) + billing_period = serializers.CharField(required=False) + project_name = serializers.CharField(required=False) + development_key_id = serializers.IntegerField(required=False) + deploy_key_id = serializers.IntegerField(required=False) + release_branch = serializers.CharField(required=False) + services = serializers.JSONField(required=False) + connection = AccountSetupConnectionSerializer(required=False) + repository = AccountSetupRepositorySerializer(required=False) + environment_configuration = EnvironmentConfigurationSerializer(required=False) + project_settings = ProjectSettingsSerializer(required=False) + + def validate(self, attrs): + request = self.context["request"] + cluster = get_cluster(request) + if not cluster.is_feature_enabled("accounts_signup"): + raise serializers.ValidationError("Accounts provisioning is not supported") + + if request.user.trial_accounts > 0: + raise serializers.ValidationError( + "User had already created trial accounts in the past" + ) + + account_slug = attrs.get("account_slug") + if not account_slug: + # Account is new and Free trial + active_accounts = Account.objects.active_accounts().count() + max_accounts = cluster.all_limits["max_cluster_active_accounts"] + if active_accounts >= max_accounts: + raise serializers.ValidationError( + "Accounts can't be created at the moment." + ) + + active_trial_accounts = Account.objects.active_trial_accounts().count() + max_trial_accounts = cluster.all_limits["max_cluster_active_trial_accounts"] + if active_trial_accounts >= max_trial_accounts: + raise serializers.ValidationError( + "Free Trial Accounts can't be created at the moment." + ) + return attrs + + def _delete_temp_unused_ssh_keys(self, user): + """ + Delete all temporary unused SSH keys. + """ + user_keys = user.repositories.all().values_list("ssh_key_id", flat=True) + project_keys = Project.objects.all().values_list("deploy_key_id", flat=True) + SSHKey.objects.filter(created_by=user).exclude(id__in=user_keys).exclude( + id__in=project_keys + ).delete() + + def create(self, validated_data): + user = self.context.get("request").user + account_slug = validated_data.get("account_slug") + plan = Plan.objects.get( + slug=f"{validated_data['plan']}-{validated_data['billing_period']}" + ) + + with transaction.atomic(): + if account_slug: + account = Account.objects.get(slug=account_slug, created_by=user) + account.plan = plan + account.save() + else: + account = Account.objects.create( + name=validated_data["account_name"], created_by=user, plan=plan + ) + + if validated_data.get("project_name"): + services_data = validated_data.pop("services") + connection_data = validated_data.pop("connection") + repo_data = validated_data.pop("repository") + dev_key_id = validated_data.pop("development_key_id", None) + deploy_key_id = validated_data.pop("deploy_key_id", None) + environment_configuration = validated_data.pop( + "environment_configuration", {} + ) + project_settings = validated_data.pop("project_settings", {}) + + git_url = repo_data.pop("git_url") + repo, _ = Repository.objects.get_or_create( + git_url=git_url, defaults=repo_data + ) + UserRepository.objects.update_or_create( + user=user, + repository=repo, + defaults={ + "ssh_key_id": dev_key_id, + "validated_at": timezone.now(), + }, + ) + + project = Project.objects.create( + name=validated_data["project_name"], + release_branch=validated_data["release_branch"], + deploy_key_id=deploy_key_id, + account=account, + repository=repo, + validated_at=timezone.now(), + settings=project_settings, + ) + environment = self._create_environment( + project, services_data, connection_data, environment_configuration + ) + self._create_connection_template_and_credentials( + connection_data, project, user, environment + ) + + # Avoid triggering extra workspace syncs + environment.sync = True + environment.save() + + self._add_user_to_account_groups(user, account) + self._delete_temp_unused_ssh_keys(user) + + return account + + def _create_environment( + self, project, services_data, connection_data, environment_configuration + ): + """Create environment with corresponding services info""" + services_data["code-server"] = {"enabled": True} + cluster = Cluster.objects.current().first() + release = Release.objects.get_latest() + return Environment.objects.create( + name="Development", + project=project, + services=services_data, + cluster=cluster, + release=release, + release_profile=f"dbt-{connection_data['type']}", + sync=False, + airflow_config=environment_configuration.get("airflow_config", {}), + dbt_home_path=environment_configuration.get("dbt_home_path", ""), + ) + + def _create_connection_template_and_credentials( + self, connection_data, project, user, environment + ): + """Creates connection and default user credentials""" + connection_data["project"] = project + connection_data["type"] = ConnectionType.objects.get( + slug=connection_data["type"] + ) + connection_data["name"] = "Main" + + if connection_data["type"].is_snowflake or connection_data["type"].is_redshift: + overrides = { + "user": connection_data["connection_details"].pop("user"), + "password": connection_data["connection_details"].pop("password"), + "schema": connection_data["connection_details"].pop("schema"), + } + if connection_data["type"].is_snowflake: + overrides["mfa_protected"] = connection_data["connection_details"].pop( + "mfa_protected" + ) + elif connection_data["type"].is_databricks: + overrides = { + "token": connection_data["connection_details"].pop("token"), + "schema": connection_data["connection_details"].pop("schema"), + } + elif connection_data["type"].is_bigquery: + overrides = { + "keyfile_json": connection_data["connection_details"].pop( + "keyfile_json" + ), + } + + connection_template = ConnectionTemplate.objects.create(**connection_data) + UserCredential.objects.create( + user=user, + environment=environment, + connection_template=connection_template, + validated_at=timezone.now(), + connection_overrides=overrides, + ) + + def _add_user_to_account_groups(self, user, account): + """Add users to default account groups""" + groups = Group.objects.filter( + extended_group__account=account, + extended_group__role__in=[ + ExtendedGroup.Role.ROLE_ACCOUNT_ADMIN, + ExtendedGroup.Role.ROLE_PROJECT_DEVELOPER, + ExtendedGroup.Role.ROLE_PROJECT_SYSADMIN, + ExtendedGroup.Role.ROLE_DEFAULT, + ], + ) + for group in groups: + user.groups.add(group) + + def to_representation(self, instance): + return AccountSerializer(instance, context=self.context).data + + +class ServiceSecretSerializer(serializers.ModelSerializer): + class Meta: + model = ServiceCredential + fields = ["id", "service", "name"] + + def to_representation(self, instance): + representation = super().to_representation(instance) + representation["connection"] = instance.combined_connection() + + return representation + + +class ServiceCredentialSerializer(serializers.ModelSerializer): + class Meta: + model = ServiceCredential + fields = [ + "id", + "service", + "environment", + "name", + "connection_template", + "connection_overrides", + "ssl_key", + "public_ssl_key", + "validated_at", + "delivery_mode", + ] + + def update(self, instance, validated_data): + if validated_data.get("ssl_key"): + if "password" in validated_data["connection_overrides"]: + del validated_data["connection_overrides"]["password"] + else: + # Setting password only if it has a value and was already set in the db + password = instance.connection_overrides.get("password") + if password is not None and not validated_data["connection_overrides"].get( + "password" + ): + validated_data["connection_overrides"]["password"] = password + + token = instance.connection_overrides.get("token") + if token is not None and not validated_data["connection_overrides"].get( + "token" + ): + validated_data["connection_overrides"]["token"] = token + + ret = super().update(instance, validated_data) + + if validated_data["delivery_mode"] == "connection": + try: + push_secrets_to_airflow(instance.environment) + except Exception as e: + # Let's log the error but not do anything with it. + # The most likely reason for this to fail is airflow + # isn't ready to receive the request for some reason, + # and this sort of error can sort itself out easily + # enough by either starting/stopping airflow or just + # waiting a little bit. + # + # I want to log it, though, in case this happens a lot + # in which case there could be a bug :) But I think + # this should rarely happen. + logger.error(e) + + return ret + + def create(self, validated_data): + ret = super().create(validated_data) + + if validated_data["delivery_mode"] == "connection": + try: + push_secrets_to_airflow(ret.environment) + except Exception as e: + # Let's log the error but not do anything with it. + # The most likely reason for this to fail is airflow + # isn't ready to receive the request for some reason, + # and this sort of error can sort itself out easily + # enough by either starting/stopping airflow or just + # waiting a little bit. + # + # I want to log it, though, in case this happens a lot + # in which case there could be a bug :) But I think + # this should rarely happen. + logger.error(e) + + return ret + + def to_representation(self, instance): + rep = super().to_representation(instance) + if "password" in rep["connection_overrides"]: + del rep["connection_overrides"]["password"] + + if "token" in rep["connection_overrides"]: + del rep["connection_overrides"]["token"] + + return rep + + +class ProfileFileSerializer(serializers.ModelSerializer): + id = serializers.IntegerField(required=False) + + class Meta: + model = ProfileFile + fields = ( + "id", + "template", + "mount_path", + "override_existent", + "execute", + ) + + +class ProfileSerializer(serializers.ModelSerializer): + files = ProfileFileSerializer(many=True) + files_from = RecursiveField() + + class Meta: + model = Profile + fields = [ + "id", + "name", + "slug", + "account", + "dbt_sync", + "dbt_local_docs", + "mount_ssl_keys", + "mount_ssh_keys", + "mount_api_token", + "clone_repository", + "files_from", + "files", + "is_system_profile", + ] + + def _save_profile_files(self, profile, files): + dont_delete = [file.get("id") for file in files if file.get("id")] + ProfileFile.objects.filter(profile=profile).exclude(id__in=dont_delete).delete() + for file in files: + int_id = file.pop("id", None) + file["profile"] = profile + ProfileFile.objects.update_or_create(id=int_id, defaults=file) + + def create(self, validated_data): + validated_data["account"] = Account.objects.get(slug=self.context["account"]) + profile_files = validated_data.pop("files", []) + instance = super().create(validated_data) + self._save_profile_files(instance, profile_files) + return instance + + def update(self, instance, validated_data): + validated_data["account"] = Account.objects.get(slug=self.context["account"]) + profile_files = validated_data.pop("files", []) + instance = super().update(instance, validated_data) + self._save_profile_files(instance, profile_files) + return instance + + def to_representation(self, instance): + data = super().to_representation(instance) + data["profile_files_count"] = instance.files.count() + return data diff --git a/src/core/api/app/projects/signals.py b/src/core/api/app/projects/signals.py new file mode 100644 index 00000000..ea3103a0 --- /dev/null +++ b/src/core/api/app/projects/signals.py @@ -0,0 +1,128 @@ +from django.contrib.auth.models import Permission +from django.core.cache import cache +from django.db.models.signals import ( + m2m_changed, + post_delete, + post_save, + pre_delete, + pre_save, +) +from django.dispatch import receiver +from users.models import Account, User + +from lib.utils import m2m_changed_subjects_and_objects + +from .models import Environment, Project, SSHKey, SSLKey + + +@receiver(post_save, sender=Project, dispatch_uid="projects.handle_project_post_save") +def handle_project_post_save(sender, **kwargs): + if kwargs["created"]: + project = kwargs["instance"] + project.create_permissions() + project.create_project_groups() + + +@receiver( + post_save, sender=Environment, dispatch_uid="projects.handle_environment_post_save" +) +def handle_environment_post_save(sender, **kwargs): + if kwargs["created"]: + env = kwargs["instance"] + env.create_permissions() + env.create_environment_groups() + + +@receiver( + m2m_changed, + sender=User.groups.through, + dispatch_uid="projects.handle_user_groups_changed", +) +def handle_user_groups_changed(sender, **kwargs): + """We want to create new associations every time a user access a new project""" + users, group_pks = m2m_changed_subjects_and_objects(kwargs) + action = kwargs["action"] + if action == "post_add": + permissions_granted_by_groups = Permission.objects.filter( + group__in=group_pks, + name__contains="|workbench:", + ).values_list("name", flat=True) + projects = Project.from_permission_names(permissions_granted_by_groups) + if projects: + for key in SSHKey.objects.filter( + created_by__in=users, usage=SSHKey.USAGE_USER + ): + key.associate_to_user_repos(projects=projects) + + +@receiver(pre_delete, sender=SSLKey, dispatch_uid="projects.handle_ssl_key_pre_delete") +def handle_ssl_key_pre_delete(sender, **kwargs): + ssl_key = kwargs["instance"] + ssl_key.user_credentials.update(validated_at=None) + + +@receiver( + post_delete, sender=Account, dispatch_uid="projects.handle_account_post_delete" +) +def handle_account_post_delete(sender, **kwargs): + account = kwargs.get("instance") + if account: + Permission.objects.filter(name__startswith=f"{account.slug}|").delete() + + +@receiver( + post_delete, + sender=Project, + dispatch_uid="projects.handle_project_post_delete", +) +def handle_project_post_delete(sender, **kwargs): + project = kwargs.get("instance") + if project: + Permission.objects.filter( + name__startswith=f"{project.account.slug}:{project.slug}|" + ).delete() + + +@receiver( + post_delete, + sender=Environment, + dispatch_uid="projects.handle_environment_post_delete", +) +def handle_environment_post_delete(sender, **kwargs): + environment = kwargs.get("instance") + if environment: + project = environment.project + account = project.account + Permission.objects.filter( + name__startswith=f"{account.slug}:{project.slug}:{environment.slug}|" + ).delete() + + +@receiver( + pre_save, sender=Environment, dispatch_uid="clusters.handle_environment_pred_save" +) +def handle_environment_pre_save(sender, **kwargs): + env = kwargs["instance"] + env_old = Environment.objects.only("services").filter(pk=env.id).first() + + # Services + for service_name, service_new in env.services.items(): + service_old = env_old.services.get(service_name) if env_old else {} + + if service_new and service_new.get("enabled"): + if service_old and service_old.get("enabled", "") != service_new.get( + "enabled" + ): + service_cache_key = f"{env.slug}-{service_name}-enabled" + cache.set(service_cache_key, service_new, timeout=None) + + # Internal services + for service_name, service_new in env.internal_services.items(): + service_old = env_old.internal_services.get(service_name) if env_old else {} + + if service_new and service_new.get("enabled"): + if service_old and service_old.get("enabled", "") != service_new.get( + "enabled" + ): + service_cache_key = f"{env.slug}-{service_name}-enabled" + cache.set(service_cache_key, service_new, timeout=None) diff --git a/src/core/api/app/projects/tasks.py b/src/core/api/app/projects/tasks.py new file mode 100644 index 00000000..b523be8c --- /dev/null +++ b/src/core/api/app/projects/tasks.py @@ -0,0 +1,717 @@ +import re +import time +from datetime import datetime, timedelta +from enum import Enum +from math import log1p + +from celery import Task +from celery.utils.log import get_task_logger +from clusters.builder import WorkbenchBuilder +from clusters.models import Cluster +from clusters.workspace import user_workloads_status +from dateutil.relativedelta import relativedelta +from django.conf import settings +from django.db.models import Q +from django.db.models.functions import Now +from django.utils import timezone +from kubernetes.client.exceptions import ApiException +from projects.models import ( + BlockedPodCreationRequest, + Environment, + ProfileImageSet, + Project, + SSHKey, + UserEnvironment, + UserRepository, +) +from users.models import Account, User + +import lib.kubernetes.client as k8s_client +from datacoves.celery import app +from lib.channel import DjangoChannelNotify +from lib.docker import builder +from lib.requirements import merge_requirement_lines, write_requirements + +logger = get_task_logger(__name__) + +USER_INACTIVITY_WINDOW = timezone.now() - relativedelta(months=2) + + +@app.task +def turn_off_unused_workspaces(): + # Must be greater than heartbeat period (index.tsx). + cluster = Cluster.objects.current().first() + window = cluster.settings.get("code_server_inactivity_threshold", 30) + no_pulse_period = timedelta(minutes=window) + if cluster.features_enabled.get("stop_codeserver_on_inactivity"): + user_envs = UserEnvironment.objects.filter( + heartbeat_at__lt=Now() - no_pulse_period, + code_server_active=True, + ) + for ue in user_envs: + logger.info("Stopping code server for user environment %s", ue) + ue.code_server_active = False + ue.code_server_local_airflow_active = False + ue.save() + + +@app.task +def stop_sharing_codeservers(): + shared_period = timedelta(minutes=120) + for ue in UserEnvironment.objects.filter( + Q(code_server_last_shared_at__lt=Now() - shared_period) + | Q(code_server_last_shared_at__isnull=True), + ).exclude(code_server_access=UserEnvironment.ACCESS_PRIVATE): + ue.code_server_access = UserEnvironment.ACCESS_PRIVATE + ue.save() + + +### PROFILE IMAGES ### + + +@app.task +def build_profile_image_set(image_set_id): + """Builds profile image set images""" + image_set = ProfileImageSet.objects.get(id=image_set_id) + release = image_set.release + cluster = Cluster.objects.current().first() + timestamp = datetime.utcnow().strftime("%Y%m%d%H%M%S") + image_set.images_status = {} + + def _images(image_name): + base_image = ProfileImageSet.BASE_IMAGES[image_name] + base_repo, base_tag = cluster.get_image(base_image, release=release) + base_path, base_name = base_repo.rsplit("/", 1) + from_image = ":".join((base_repo, base_tag)) + version = base_tag.split("-")[0] + image_repo = f"{base_path}/pi{image_set_id}-{base_name}" + image_tag = f"{image_repo}:{version}-{timestamp}" + return from_image, image_tag + + if image_set.build_code_server: + from_image_for_code_server, image_tag_for_code_server = _images("code_server") + image_set.images_status[image_tag_for_code_server] = "building" + build_profile_image_code_server.delay( + image_set_id, image_tag_for_code_server, from_image_for_code_server + ) + if image_set.build_dbt_core_interface: + from_image_for_dbt_core_interface, image_tag_for_dbt_core_interface = _images( + "dbt_core_interface" + ) + image_set.images_status[image_tag_for_dbt_core_interface] = "building" + build_profile_image_dbt_core_interface.delay( + image_set_id, + image_tag_for_dbt_core_interface, + from_image_for_dbt_core_interface, + ) + if image_set.build_airflow: + from_image_for_airflow, image_tag_for_airflow = _images("airflow") + image_set.images_status[image_tag_for_airflow] = "building" + build_profile_image_airflow.delay( + image_set_id, image_tag_for_airflow, from_image_for_airflow + ) + if image_set.build_ci_basic: + from_image_for_ci_basic, image_tag_for_ci_basic = _images("ci_basic") + image_set.images_status[image_tag_for_ci_basic] = "building" + build_profile_image_ci_basic.delay( + image_set_id, image_tag_for_ci_basic, from_image_for_ci_basic + ) + if image_set.build_ci_airflow: + from_image_for_ci_airflow, image_tag_for_ci_airflow = _images("ci_airflow") + image_set.images_status[image_tag_for_ci_airflow] = "building" + build_profile_image_ci_airflow.delay( + image_set_id, image_tag_for_ci_airflow, from_image_for_ci_airflow + ) + image_set.save() + + +@app.task +def build_profile_image_code_server(image_set_id, image_tag, from_image): + image_set = ProfileImageSet.objects.get(id=image_set_id) + reqs = merge_requirement_lines( + image_set.python_requirements, image_set.code_server_requirements + ) + + def image_def(ctx, d): + write_requirements(ctx, reqs) + d.FROM(from_image) + reqsdir = "/opt/datacoves/profile/python" + d.RUN(f"mkdir -p {reqsdir}") + d.COPY("requirements.txt", f"{reqsdir}/requirements.txt") + install_cmd = ( + f"pip install -r {reqsdir}/requirements.txt --no-warn-script-location" + ) + adapters_cmd = ( + "/opt/datacoves/set_adapters_app.sh all /config/.local/lib && " + "/opt/datacoves/set_adapters_app.sh bigquery /config/.local/lib --skip-validation && " + "/opt/datacoves/set_adapters_app.sh databricks /config/.local/lib --skip-validation && " + "/opt/datacoves/set_adapters_app.sh spark /config/.local/lib --skip-validation" + ) + d.RUN( + f"sudo -u abc bash -c '{install_cmd}' && {adapters_cmd} && mv /config/.local {reqsdir}/local" + ) + if image_set.code_server_extensions: + cmd = "cd /opt/datacoves/profile/extensions && (rm -f *.vsix || true)" + for extension_url in image_set.code_server_extensions: + cmd += f" && /opt/datacoves/download_extension.sh '{extension_url}'" + d.RUN(cmd) + + build_profile_image(image_set, image_tag, image_def) + + +@app.task +def build_profile_image_dbt_core_interface(image_set_id, image_tag, from_image): + image_set = ProfileImageSet.objects.get(id=image_set_id) + reqs = merge_requirement_lines( + image_set.python_requirements, image_set.code_server_requirements + ) + adapters_cmd = ( + "/usr/src/bin/set_adapters_app.sh all /usr/local/lib && " + "/usr/src/bin/set_adapters_app.sh bigquery /usr/local/lib --skip-validation && " + "/usr/src/bin/set_adapters_app.sh databricks /usr/local/lib --skip-validation && " + "/usr/src/bin/set_adapters_app.sh spark /usr/local/lib --skip-validation" + ) + + def image_def(ctx, d): + write_requirements(ctx, reqs) + d.FROM(from_image) + d.COPY("requirements.txt", "/requirements.txt") + d.RUN( + f"pip install -r /requirements.txt && pip uninstall -y sqlfluff-templater-dbt && {adapters_cmd}" + ) + + build_profile_image(image_set, image_tag, image_def) + + +@app.task +def build_profile_image_airflow(image_set_id, image_tag, from_image): + image_set = ProfileImageSet.objects.get(id=image_set_id) + reqs = merge_requirement_lines( + image_set.python_requirements, image_set.airflow_requirements + ) + adapters_cmd = ( + "/opt/datacoves/set_adapters_app.sh all /opt/datacoves/virtualenvs/main/lib && " + "/opt/datacoves/set_adapters_app.sh postgres /opt/datacoves/virtualenvs/main/lib --skip-validation && " + "/opt/datacoves/set_adapters_app.sh bigquery /opt/datacoves/virtualenvs/main/lib --skip-validation && " + "/opt/datacoves/set_adapters_app.sh databricks /opt/datacoves/virtualenvs/main/lib --skip-validation" + ) + + def image_def(ctx, d): + write_requirements(ctx, reqs) + d.FROM(from_image) + venvdir = "/opt/datacoves/virtualenvs/main" + d.COPY("requirements.txt", "requirements.txt") + d.RUN(f"{venvdir}/bin/pip install -r requirements.txt && {adapters_cmd}") + + build_profile_image(image_set, image_tag, image_def) + + +@app.task +def build_profile_image_ci_basic(image_set_id, image_tag, from_image): + image_set = ProfileImageSet.objects.get(id=image_set_id) + reqs = merge_requirement_lines( + image_set.python_requirements, image_set.ci_requirements + ) + adapters_cmd = ( + "./set_adapters_app.sh all /usr/local/lib && " + "./set_adapters_app.sh postgres /usr/local/lib --skip-validation && " + "./set_adapters_app.sh bigquery /usr/local/lib --skip-validation && " + "./set_adapters_app.sh databricks /usr/local/lib --skip-validation" + ) + + def image_def(ctx, d): + write_requirements(ctx, reqs) + d.FROM(from_image) + d.COPY("requirements.txt", "/requirements.txt") + d.RUN(f"pip install -r /requirements.txt && {adapters_cmd}") + + build_profile_image(image_set, image_tag, image_def) + + +@app.task +def build_profile_image_ci_airflow(image_set_id, image_tag, from_image): + image_set = ProfileImageSet.objects.get(id=image_set_id) + reqs = merge_requirement_lines( + image_set.python_requirements, image_set.ci_requirements + ) + adapters_cmd = ( + "/opt/datacoves/set_adapters_app.sh all /opt/datacoves/virtualenvs/main/lib && " + "/opt/datacoves/set_adapters_app.sh postgres /opt/datacoves/virtualenvs/main/lib --skip-validation && " + "/opt/datacoves/set_adapters_app.sh bigquery /opt/datacoves/virtualenvs/main/lib --skip-validation && " + "/opt/datacoves/set_adapters_app.sh databricks /opt/datacoves/virtualenvs/main/lib --skip-validation" + ) + + def image_def(ctx, d): + write_requirements(ctx, reqs) + d.FROM(from_image) + d.COPY("requirements.txt", "/requirements.txt") + d.RUN( + f"/opt/datacoves/virtualenvs/main/bin/pip install -r /requirements.txt && {adapters_cmd}" + ) + + build_profile_image(image_set, image_tag, image_def) + + +def build_profile_image(image_set, image_tag, image_def): + try: + assert ( + Cluster.objects.count() == 1 + ), "Invalid assumption: There isn't a single cluster." + + cluster = Cluster.objects.current().first() + build_id = builder.build_and_push_with_kaniko( + cluster=cluster, + image_set=image_set, + image_tag=image_tag, + image_def=image_def, + ns=builder.BUILD_NS, + ) + image_set.set_image_status(image_tag, f"building {build_id}") + + except Exception as e: + image_set.set_image_status(image_tag, "build_prep_error") + raise e + + +@app.task +def check_profile_image_build( + image_set_id: int, cluster_id: int, image_tag: str, build_id: str, logs: str = "" +): + image_set = ProfileImageSet.objects.filter(id=image_set_id).first() + cluster = Cluster.objects.get(id=cluster_id) + phase, _ = builder.check_kaniko_build(cluster, build_id) + if not image_set: + return + if phase == "Succeeded": + image_set.set_image_status( + image_tag=image_tag, status=ProfileImageSet.IMAGE_STATUS_BUILT, logs=logs + ) + elif phase == "Failed": + logger.error( + "profile image build failed | image_set_id=%s, phase=%s, image_tag=%s, build_id=%s", + image_set_id, + phase, + image_tag, + build_id, + ) + image_set.set_image_status( + image_tag=image_tag, + status=ProfileImageSet.IMAGE_STATUS_BUILT_ERROR, + logs=logs, + ) + + image_set.refresh_from_db() + done = image_set.set_images_if_built() + image_set.clean_images_logs() + return f"Profile image set id={image_set_id} {'done' if done else 'working'}" + + +@app.task +def delete_unused_project_keys(): + """ + Deletes all temp ssh keys that are not used in projects, created more than 1 hour ago + """ + project_keys = Project.objects.all().values_list("deploy_key_id", flat=True) + user_keys = UserRepository.objects.all().values_list("ssh_key_id", flat=True) + SSHKey.objects.filter( + usage=SSHKey.USAGE_PROJECT, created_at__lt=Now() - timedelta(minutes=60) + ).exclude(id__in=project_keys).exclude(id__in=user_keys).delete() + + +@app.task +def delete_unused_user_keys(): + """ + Deletes all temp ssh keys that are not used by users, created more than 24 hours ago + """ + project_keys = Project.objects.all().values_list("deploy_key_id", flat=True) + user_keys = UserRepository.objects.all().values_list("ssh_key_id", flat=True) + SSHKey.objects.filter( + usage=SSHKey.USAGE_USER, created_at__lt=Now() - timedelta(hours=24) + ).exclude(id__in=project_keys).exclude(id__in=user_keys).delete() + + +@app.task +def send_notification_email(blocked_pod_id): + obj: BlockedPodCreationRequest = BlockedPodCreationRequest.objects.filter( + id=blocked_pod_id + ).first() + obj.send_notification_email() + + +@app.task +def remove_unused_user_volumes(): + """ + This task will delete any code server pvc belonging to developers that did not use + an environment on the last 2 or more months, or when the user has been deactivated + """ + + kubectl = k8s_client.Kubectl() + response = kubectl.CoreV1Api.list_persistent_volume_claim_for_all_namespaces() + pvcs = [ + { + "name": item.metadata.name, + "namespace": item.metadata.namespace, + "volume": item.spec.volume_name, + } + for item in response.items + if item.metadata.namespace[:3] == "dcw" + ] + deactivated_users = User.objects.filter( + deactivated_at__lte=USER_INACTIVITY_WINDOW + ).values_list("slug", flat=True) + user_envs = _code_server_user_envs() + for user_volume in pvcs: + user_matches = re.search( + r"code-server-([a-z\d\-]+)-config-volume", user_volume["name"] + ) + env_slug = user_volume["namespace"][4:] + if user_matches and env_slug: + user_slug = user_matches.group(1) + # If user was deactivated or has no permissions to env + delete = False + if user_slug in deactivated_users: + delete = True + else: + if (user_slug, env_slug) not in user_envs: + # Validate that user env does not exist or not accessed during last month + user_env = UserEnvironment.objects.filter( + user__slug=user_slug, environment__slug=env_slug + ).first() + if not user_env or user_env.heartbeat_at <= USER_INACTIVITY_WINDOW: + delete = True + if delete: + logger.info( + f"Trying to remove pvc name: {user_volume['name']}, namespace: {user_volume['namespace']} " + f"for deactivated user: {user_slug}" + ) + try: + kubectl.CoreV1Api.delete_namespaced_persistent_volume_claim( + user_volume["name"], user_volume["namespace"] + ) + logger.info( + f"PVC deleted name: {user_volume['name']}, namespace: {user_volume['namespace']}" + ) + kubectl.CoreV1Api.delete_persistent_volume(user_volume["volume"]) + logger.info(f"PV deleted name: {user_volume['volume']}") + except ApiException as e: + logger.error(f"Unexpected error: {e}") + + +@app.task +def deactivate_users(): + """ + This task will deactivate any user whose user environments were not used during the last 2 months + Their user environments will be deleted as well + """ + for user in User.objects.filter( + deactivated_at__isnull=True, is_service_account=False + ): + inactive_envs = UserEnvironment.objects.filter( + user=user, heartbeat_at__lt=USER_INACTIVITY_WINDOW + ).count() + all_envs = UserEnvironment.objects.filter(user=user) + if inactive_envs == all_envs.count(): + # All User environments are inactive, then we deactivate the user and remove all user envs + user.deactivated_at = timezone.now() + user.save() + all_envs.delete() + + +def _code_server_user_envs(): + """ + Returns a set of tuples user_slug - env_slug for each environment user has access to code server + """ + proj_slugs = Environment.objects.all().values_list("slug", "project__slug") + user_envs = [] + for user in User.objects.all(): + project_slugs, env_slugs = user.project_and_env_slugs( + f"|workbench:{settings.SERVICE_CODE_SERVER}" + ) + for env in proj_slugs: + for proj_slug in project_slugs: + if env[1] == proj_slug: + user_envs.append((user.slug, env[0])) + for env_slug in env_slugs: + user_envs.append((user.slug, env_slug)) + return set(user_envs) + + +@app.task +def remove_unused_environments(): + """ + Get accounts which trials ended more than 2 months ago, or have been cancelled + and delete all environments belonging to it. There's a signal that triggers + k8s namespace deletion automatically. + """ + cluster = Cluster.objects.current().first() + account_query = Account.objects.filter( + Q(trial_ends_at__lte=USER_INACTIVITY_WINDOW) + | ( + Q(subscription_updated_at__lte=USER_INACTIVITY_WINDOW) + & Q(cancelled_subscription__isnull=False) + ) + ) + deactivated_accounts = [ + account + for account in account_query + if account.is_suspended(cluster) + and ( + not account.cancelled_subscription # when cancelled, should be >= 1 months ago + or account.cancelled_subscription_period_end <= USER_INACTIVITY_WINDOW + ) + ] + for environment in Environment.objects.filter( + project__account__in=deactivated_accounts + ): + environment.delete() + # Set deactivation datetime on accounts if not deactivated yet + for account in deactivated_accounts: + if not account.deactivated_at: + account.deactivated_at = timezone.now() + account.save() + + +class EnvironmentStatusError(Exception): + pass + + +class EnvironmentStatusEnum(Enum): + """Enum to define the environment status.""" + + NOT_FOUND = "not_found" + RUNNING = "running" + IN_PROGRESS = "in_progress" + MAX_RETRY = "max_retries" + + +class EnviromentStatusTaskWithRetry(Task): + """Base task to check the status of an environment per user. + + https://docs.celeryq.dev/en/latest/userguide/tasks.html#Task.autoretry_for + + Args: + Task: Celery task + """ + + autoretry_for = (EnvironmentStatusError,) + max_retries = 150 # 150 retries = 10 minutes (150*4/60) + retry_backoff = True + retry_backoff_max = 4 + retry_jitter = False + acks_late = True + + def _sync_user_env(self, ue_id: int): + if ue_id: + try: + logger.info( + f"Trying to sync user environment id={ue_id} after task failure" + ) + ue = ( + UserEnvironment.objects.only("environment__slug", "user__slug") + .select_related("user", "environment") + .get(pk=ue_id) + ) + + WorkbenchBuilder(user=ue.user, env_slug=ue.environment.slug).heartbeat() + UserEnvironment.objects.get(pk=ue_id).save() + + except UserEnvironment.DoesNotExist: + pass + + def _send_message_to_client( + self, kwargs, check_status: EnvironmentStatusEnum, user_env_status: dict + ): + account_slug = kwargs.get("account_slug") + env_slug = kwargs.get("env_slug") + user_slug = kwargs.get("user_slug") + + payload = { + "env": env_slug, + "check_status": check_status.value, + "details": user_env_status + or {"status": "not_found", "updated_at": timezone.now()}, + } + + group_name = f"workspace_user_account_slug_{account_slug}_user_slug_{user_slug}" + with DjangoChannelNotify( + consumer="env.status.change", + group_name=group_name, + message_type="env.status", + payload=payload, + ): + pass + + def on_success(self, retval, task_id, args, kwargs): + user_env_status = retval.get( + "user_env_status", + { + "status": EnvironmentStatusEnum.RUNNING.value, + "updated_at": timezone.now(), + }, + ) + + self._send_message_to_client( + kwargs=kwargs, + check_status=EnvironmentStatusEnum.RUNNING, + user_env_status=user_env_status, + ) + return super().on_success(retval, task_id, args, kwargs) + + def on_retry(self, exc, task_id, args, kwargs, einfo): + # The task is running synchronously. + if self.request.is_eager: + time.sleep(5) + + user_env_status = getattr(exc, "user_env_status", None) + self._send_message_to_client( + kwargs=kwargs, + check_status=EnvironmentStatusEnum.IN_PROGRESS, + user_env_status=user_env_status, + ) + + # Re-sync user environment for triggering save signal + if self.request.retries in (18, 30, 60): + ue_id = getattr(exc, "ue_id", None) + self._sync_user_env(ue_id=ue_id) + + return super().on_retry(exc, task_id, args, kwargs, einfo) + + def on_failure(self, exc, task_id, args, kwargs, einfo): + user_env_status = {"status": "error", "updated_at": timezone.now()} + self._send_message_to_client( + kwargs=kwargs, + check_status=EnvironmentStatusEnum.MAX_RETRY, + user_env_status=user_env_status, + ) + + # Update user environment for triggering save signal. + ue_id = getattr(exc, "ue_id", None) + self._sync_user_env(ue_id=ue_id) + return super().on_failure(exc, task_id, args, kwargs, einfo) + + +@app.task(base=EnviromentStatusTaskWithRetry, bind=True) +def sync_user_workloads_status( + self, account_slug: str, env_slug: str, user_slug: int, **kwargs +): + try: + ue = ( + UserEnvironment.objects.only( + "code_server_restarted_at", + "services", + "user__slug", + "environment__created_at", + "environment__services", + "environment__slug", + "environment__type", + "environment__project", + "environment__project__account", + "environment__project__account__developer_licenses", + ) + .select_related( + "user", + "environment", + "environment__project", + "environment__project__account", + ) + .get(user__slug=user_slug, environment__slug=env_slug) + ) + + date = timezone.now() + user_env_status = user_workloads_status(ue=ue) + + # Delay to wait for environment components when the environment is created + services = user_env_status.get("services", {}) + total_services = 0 + total_services_running = 0 + env_created = True + + for service, state in services.items(): + total_services += 1 + if state != EnvironmentStatusEnum.RUNNING.value: + env_created = False + else: + total_services_running += 1 + + wait_time = 600 # seconds + seconds_elapsed = (date - ue.environment.created_at).total_seconds() + if not env_created and seconds_elapsed < wait_time: + if total_services == 0: + progress = int(min(log1p(seconds_elapsed) * 3, 80)) + else: + service_progress = total_services_running / total_services + if service_progress < 1.0: + time_boost = log1p(seconds_elapsed) / 25 + progress = int(min((service_progress + time_boost) * 100, 99)) + else: + progress = 100 + + error = EnvironmentStatusError( + f"Waiting for environment {env_slug} creation {progress}% completed" + ) + error.ue_id = ue.id + error.user_env_status = { + "status": EnvironmentStatusEnum.NOT_FOUND.value, + "updated_at": date, + "progress": progress, + } + raise error + + # Check if the user has restarted his environment + if ue.code_server_restarted_at: + # We need to wait for some Kubernetes components after a restart + seconds_elapsed = (date - ue.code_server_restarted_at).total_seconds() + if seconds_elapsed < 15: + error = EnvironmentStatusError( + f"Waiting for {seconds_elapsed:.1f} seconds after user environment {str(ue)} restart" + ) + error.ue_id = ue.id + error.user_env_status = { + "status": EnvironmentStatusEnum.IN_PROGRESS.value, + "updated_at": date, + } + raise error + + # Task retry + component = kwargs.get("component") + state = user_env_status.get("status", "") + if ( + component == "launchpad" + and state not in ("in_progress", "running") + or component != "launchpad" + and state != "running" + ): + error = EnvironmentStatusError( + f"Checking user environment: {str(ue)} state={state} attempt={self.request.retries}" + ) + error.ue_id = ue.id + error.user_env_status = user_env_status + raise error + + return { + "message": f"Check user environment status task finished successfully for {str(ue)}", + "user_env_status": user_env_status, + } + + except UserEnvironment.DoesNotExist: + # UserEnvironment has not created yet + error = EnvironmentStatusError( + f"UserEnvironment not found for user={user_slug} env={env_slug}" + ) + error.user_env_status = { + "status": EnvironmentStatusEnum.NOT_FOUND.value, + "updated_at": timezone.now(), + "progress": 0, + } + raise error + + +@app.task +def user_notification(group_name: str, message_type: str, payload: dict): + with DjangoChannelNotify( + consumer="user.notification", + group_name=group_name, + message_type=message_type, + payload=payload, + ): + pass diff --git a/src/core/api/app/projects/tests.py b/src/core/api/app/projects/tests.py new file mode 100644 index 00000000..33a3a3a5 --- /dev/null +++ b/src/core/api/app/projects/tests.py @@ -0,0 +1,22 @@ +from django.test import TestCase +from factories import AccountFactory, ProjectFactory +from users.models import MAX_SLUG_LENGTH as ACCOUNT_MAX_SLUG_LENGTH + +from .models import MAX_SLUG_LENGTH as PROJECT_MAX_SLUG_LENGTH + + +class ProjectsTests(TestCase): + """ + Test Projects different scenarios: + """ + + def test_project_slug_truncates(self) -> None: + """ + Project slug should be truncated to MAX_SLUG_LENGTH + long_name is created with length > MAX_SLUG_LENGTH + """ + long_name = "test" * 8 + account = AccountFactory.create(name=long_name) + self.assertEqual(len(account.slug), ACCOUNT_MAX_SLUG_LENGTH) + project = ProjectFactory.create(name=long_name) + self.assertEqual(len(project.slug), PROJECT_MAX_SLUG_LENGTH) diff --git a/src/core/api/app/projects/urls.py b/src/core/api/app/projects/urls.py new file mode 100644 index 00000000..4210a347 --- /dev/null +++ b/src/core/api/app/projects/urls.py @@ -0,0 +1,5 @@ +from django.urls import path + +from . import views + +urlpatterns = [path("user-info", views.userinfo)] diff --git a/src/core/api/app/projects/views.py b/src/core/api/app/projects/views.py new file mode 100644 index 00000000..e1ba93c4 --- /dev/null +++ b/src/core/api/app/projects/views.py @@ -0,0 +1,681 @@ +import logging +import shlex +import subprocess + +import sentry_sdk +from clusters.adapters.all import get_default_values +from core.mixins.views import ( + AddAccountToContextMixin, + VerboseCreateModelMixin, + VerboseUpdateModelMixin, +) +from django.db.models import Q +from django.http import HttpResponse +from django.utils import timezone +from django_filters.rest_framework import DjangoFilterBackend +from iam.models import DatacovesToken +from iam.permissions import ( + AccountIsNotSuspended, + HasAccessToAccount, + HasResourcePermission, +) +from projects.tasks import check_profile_image_build +from rest_framework import filters, generics, status, views +from rest_framework.permissions import AllowAny, IsAuthenticated +from rest_framework.response import Response +from rest_framework.status import ( + HTTP_200_OK, + HTTP_204_NO_CONTENT, + HTTP_403_FORBIDDEN, + HTTP_404_NOT_FOUND, + HTTP_408_REQUEST_TIMEOUT, + HTTP_500_INTERNAL_SERVER_ERROR, +) + +from lib.airflow import ( + AirflowAPI, + ConfigIsMissingException, + is_secret_variable_name, + push_secrets_to_airflow, +) + +from .git import test_git_connection +from .models import ( + BlockedPodCreationRequest, + ConnectionTemplate, + ConnectionType, + Environment, + Profile, + ProfileImageSet, + Project, + ServiceCredential, + SSHKey, + SSLKey, + UserCredential, +) +from .permissions import ( + IsConnectionsAdminEnabled, + IsEnvironmentsAdminEnabled, + IsProfilesAdminEnabled, + IsProjectsAdminEnabled, + IsServiceCredentialsAdminEnabled, +) +from .runners import utils +from .serializers import ( + AccountSetupSerializer, + ConnectionTemplateSerializer, + ConnectionTypeSerializer, + DbConnectionSerializer, + EnvironmentKeysSerializer, + EnvironmentSerializer, + GitConnectionSerializer, + ProfileSerializer, + ProjectConnectionSerializer, + ProjectKeysSerializer, + ProjectSerializer, + ServiceCredentialSerializer, + ServiceSecretSerializer, +) + +logger = logging.getLogger(__name__) + + +class TestDBException(Exception): + pass + + +class TestDBForbiddenException(TestDBException): + pass + + +class ProjectMixin(AddAccountToContextMixin): + serializer_class = ProjectSerializer + permission_classes = [ + IsAuthenticated, + HasAccessToAccount, + HasResourcePermission, + IsProjectsAdminEnabled, + AccountIsNotSuspended, + ] + + def get_queryset(self): + return Project.objects.filter( + account__slug=self.kwargs["account_slug"] + ).order_by("name") + + +class ProjectList( + ProjectMixin, + VerboseCreateModelMixin, + generics.ListCreateAPIView, +): + """ + List all projects by account, or create a new project. + """ + + filter_backends = [filters.SearchFilter, DjangoFilterBackend] + search_fields = ["name"] + + +class ProjectDetail( + ProjectMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + pass + + +class ProjectKeys( + ProjectMixin, + generics.RetrieveAPIView, + generics.CreateAPIView, + generics.DestroyAPIView, +): + serializer_class = ProjectKeysSerializer + + def destroy(self, request, account_slug, pk, token): + project = self.get_object() + + to_delete = DatacovesToken.objects.filter( + project=project, token_key=token + ).first() + + if to_delete: + to_delete.delete() + + return Response(status=status.HTTP_204_NO_CONTENT) + + +class EnvironmentMixin(AddAccountToContextMixin): + serializer_class = EnvironmentSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsEnvironmentsAdminEnabled, + AccountIsNotSuspended, + ] + + def get_queryset(self): + return Environment.objects.filter( + project__account__slug=self.kwargs.get("account_slug") + ).order_by("project__name", "name") + + +class EnvironmentList( + EnvironmentMixin, + VerboseCreateModelMixin, + generics.ListCreateAPIView, +): + filter_backends = [DjangoFilterBackend] + filterset_fields = ["project"] + + +class EnvironmentDetail( + EnvironmentMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + pass + + +class EnvironmentKeys( + EnvironmentMixin, + generics.RetrieveAPIView, + generics.CreateAPIView, + generics.DestroyAPIView, +): + serializer_class = EnvironmentKeysSerializer + + def destroy(self, request, account_slug, pk, token): + env = self.get_object() + + to_delete = DatacovesToken.objects.filter( + environment=env, token_key=token + ).first() + + if to_delete: + to_delete.delete() + + return Response(status=status.HTTP_204_NO_CONTENT) + + +class AdaptersDefaultValues(views.APIView): + permission_classes = [IsAuthenticated] + + def get(self, request): + return Response(data=get_default_values()) + + +class TestDbConnection(views.APIView): + permission_classes = [IsAuthenticated] + + @staticmethod + def _prepare_connection_data(serializer): + user_cred = None + service_cred = None + user_credential_id = serializer.validated_data.get("user_credential_id") + service_credential_id = serializer.validated_data.get("service_credential_id") + + if user_credential_id: + user_cred = UserCredential.objects.get(id=user_credential_id) + type = user_cred.connection_template.type_slug + conn_data = user_cred.combined_connection() + ssl_key_id = user_cred.ssl_key_id + elif service_credential_id: + service_cred = ServiceCredential.objects.get(id=service_credential_id) + type = service_cred.connection_template.type_slug + conn_data = service_cred.combined_connection() + ssl_key_id = service_cred.ssl_key_id + else: + type = serializer.validated_data.get("type") + conn_data = serializer.validated_data["connection"] + ssl_key_id = serializer.validated_data.get("ssl_key_id") + + if ssl_key_id: + conn_data["private_key"] = SSLKey.objects.get(id=ssl_key_id).private + + return conn_data, type, user_cred, service_cred + + def _run_connection_test(self, cmd_list, conn_type): + """ + Run a connection test. + """ + try: + subprocess.check_output(cmd_list, timeout=30) + return {"message": "Connection successful!"}, HTTP_200_OK + except subprocess.CalledProcessError as e: + stderr = e.output.decode("utf-8") + if e.returncode == 13: + return { + "error_message": f"Connection test failed: {stderr}" + }, HTTP_403_FORBIDDEN + elif e.returncode == 1: + logger.debug(stderr) + sentry_sdk.capture_message( + f"{conn_type.capitalize()} adapter connection error: {stderr}" + ) + return ( + { + "error_message": "Unexpected error found while testing database \ + connection. We've been notified. Try again later" + }, + HTTP_500_INTERNAL_SERVER_ERROR, + ) + + except subprocess.TimeoutExpired: + return { + "error_message": "Connection test timed out. Please check host." + }, HTTP_408_REQUEST_TIMEOUT + + def post(self, request): + serializer = DbConnectionSerializer(data=request.data) + if serializer.is_valid(raise_exception=True): + conn_data, type, user_cred, service_cred = self._prepare_connection_data( + serializer + ) + conn_data_bytes = utils.get_connection_b64(conn_data) + + logger.debug("Run test connection type [%s]", type) + cmd_list = shlex.split( + f"/bin/bash -c 'source ${utils.SQL_RUNNERS_VIRTUALENVS[type]}/bin/activate && python\ + projects/runners/run_on_{type}.py {conn_data_bytes}'" + ) + + data, status = self._run_connection_test(cmd_list, type) + validated_at = timezone.now() if status == HTTP_200_OK else None + if user_cred: + user_cred.validated_at = validated_at + user_cred.save() + elif service_cred: + service_cred.validated_at = validated_at + service_cred.save() + return Response(data=data, status=status) + + +class GenerateSSHKey(views.APIView): + permission_classes = [IsAuthenticated] + + def get(self, request): + usage = self.request.query_params.get("usage") + ssh_key_type = self.request.query_params.get("ssh_key_type") + + ssh_key = None + + if usage == SSHKey.USAGE_USER: + ssh_key = ( + SSHKey.objects.filter( + created_by=request.user, + usage=usage, + generated=True, + key_type=ssh_key_type, + ) + .order_by("-created_at") + .first() + ) + + if not ssh_key: + ssh_key = SSHKey.objects.new( + created_by=request.user, + usage=usage, + key_type=ssh_key_type, + ) + + return Response( + data={"id": ssh_key.id, "ssh_key": ssh_key.public}, status=HTTP_200_OK + ) + + +class GenerateSSLKey(views.APIView): + permission_classes = [IsAuthenticated] + + def get(self, request): + format = self.request.query_params.get("key_format") + ssl_key = SSLKey.objects.new( + created_by=request.user, + usage="project", + format=format, + ) + + return Response( + data={"id": ssl_key.id, "ssl_key": ssl_key.public}, status=HTTP_200_OK + ) + + +class TestGitConnection(views.APIView): + permission_classes = [IsAuthenticated] + + def post(self, request): + serializer = GitConnectionSerializer(data=request.data) + + if serializer.is_valid(raise_exception=True): + data = serializer.validated_data + return test_git_connection(data) + + +class AccountSetup(VerboseCreateModelMixin, generics.CreateAPIView): + serializer_class = AccountSetupSerializer + permission_classes = [IsAuthenticated] + + +class ConnectionTemplateMixin(AddAccountToContextMixin): + serializer_class = ConnectionTemplateSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsConnectionsAdminEnabled, + AccountIsNotSuspended, + ] + + def get_queryset(self): + return ConnectionTemplate.objects.filter( + project__account__slug=self.kwargs.get("account_slug") + ).order_by("project__name", "name") + + +class ConnectionTemplateList( + ConnectionTemplateMixin, + VerboseCreateModelMixin, + generics.ListCreateAPIView, +): + filter_backends = [DjangoFilterBackend] + filterset_fields = ["project"] + + +class ConnectionTemplateDetail( + ConnectionTemplateMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + pass + + +class ProjectConnectionTemplateList(generics.ListAPIView): + """Used to populate connection dropdowns""" + + serializer_class = ProjectConnectionSerializer + permission_classes = [IsAuthenticated, HasAccessToAccount] + filter_backends = [DjangoFilterBackend] + filterset_fields = ["project", "for_users"] + + def get_queryset(self): + return ConnectionTemplate.objects.filter( + project__account__slug=self.kwargs["account_slug"] + ).order_by("name") + + +class ConnectionTypeList(generics.ListAPIView): + """Used to populate connection type dropdowns""" + + serializer_class = ConnectionTypeSerializer + permission_classes = [IsAuthenticated, HasAccessToAccount] + + def get_queryset(self): + return ConnectionType.objects.filter( + Q(account__isnull=True) | Q(account__slug=self.kwargs["account_slug"]) + ).order_by("name") + + +class ServiceCredentialMixin: + serializer_class = ServiceCredentialSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsServiceCredentialsAdminEnabled, + AccountIsNotSuspended, + ] + + def get_queryset(self): + account_slug = self.kwargs["account_slug"] + return ServiceCredential.objects.filter( + connection_template__project__account__slug=account_slug, + ).order_by("environment__name", "service", "name") + + +class ServiceSecretList( + ServiceCredentialMixin, VerboseCreateModelMixin, generics.ListAPIView +): + """The difference with ServiceCredentialList is that this will return revealed passwords""" + + serializer_class = ServiceSecretSerializer + filter_backends = [DjangoFilterBackend] + filterset_fields = ["service", "name", "environment"] + + +class ServiceCredentialList( + ServiceCredentialMixin, VerboseCreateModelMixin, generics.ListCreateAPIView +): + filter_backends = [DjangoFilterBackend] + filterset_fields = ["service", "name", "environment"] + + +class ServiceCredentialDetail( + ServiceCredentialMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + pass + + +class AdmissionWebHookApiView(views.APIView): + permission_classes = [AllowAny] + + def post(self, request, *args, **kwargs): + obj = BlockedPodCreationRequest() + obj.set_request(request.data) + allowed = obj.is_allowed_to_run() + data = { + "apiVersion": "admission.k8s.io/v1", + "kind": "AdmissionReview", + "response": { + "uid": obj.request_uid, + "allowed": allowed, + }, + } + if not allowed: + data["response"]["status"] = { + "code": 403, + "message": "You exceeded the workers time execution in your account", + } + obj.response = data + obj.save() + return Response(data=data, status=status.HTTP_200_OK) + + +class ProfileMixin(AddAccountToContextMixin): + serializer_class = ProfileSerializer + permission_classes = [ + IsAuthenticated, + HasResourcePermission, + IsProfilesAdminEnabled, + ] + + def get_queryset(self): + return Profile.objects.filter( + account__slug=self.kwargs.get("account_slug") + ).order_by("name") + + +class ProfileList(ProfileMixin, VerboseCreateModelMixin, generics.ListCreateAPIView): + filter_backends = [filters.SearchFilter, DjangoFilterBackend] + search_fields = ["name"] + + +class ProfileDetail( + ProfileMixin, + VerboseUpdateModelMixin, + generics.RetrieveUpdateDestroyAPIView, +): + pass + + +class ProfileImageSetHook(generics.CreateAPIView): + """Hook to update the status of the Profile Image Set when it has finished building""" + + permission_classes = [IsAuthenticated] + queryset = ProfileImageSet.objects.all() + + def create(self, request, pk=None): + try: + cluster_id = request.data["cluster_id"] + image_tag = request.data["image_tag"] + build_id = request.data["build_id"] + logs = request.data["logs"] + image_set: ProfileImageSet = self.get_object() + check_profile_image_build.apply_async( + ( + image_set.id, + cluster_id, + image_tag, + build_id, + logs, + ), + countdown=10, + ) + return Response( + { + "message": "Profile image set received.", + "profile_image_set_id": image_set.id, + "build_id": build_id, + "image_tag": image_tag, + } + ) + + except ProfileImageSet.DoesNotExist: + return Response(status=status.HTTP_404_NOT_FOUND) + + +def dynamic_repo_credentials(request, uid): + """'request' must be from within Kubernetes""" + + project = Project.objects.filter(uid=uid).first() + + if project is None: + return HttpResponse(status=HTTP_404_NOT_FOUND) + + # This only works for Azure right now + if project.clone_strategy not in ( + project.AZURE_SECRET_CLONE_STRATEGY, + project.AZURE_CERTIFICATE_CLONE_STRATEGY, + ): + return HttpResponse(status=HTTP_404_NOT_FOUND) + + project.update_oauth_if_needed() + + return HttpResponse( + f"username={project.deploy_credentials['oauth_username']}\n" + f"password={project.deploy_credentials['oauth_password']}\n", + headers={ + "Content-Type": "text/plain", + }, + ) + + +def push_secrets_variable_to_airflow(request, slug): + """It will dial back into the airflow for 'slug' and try to inject + Datacoves' API key for secrets manager as a connection. + """ + + env = Environment.objects.filter(slug=slug).select_related("project").first() + + if env is None: + return HttpResponse(status=HTTP_404_NOT_FOUND) + + try: + push_secrets_to_airflow(env) + except Exception as e: + logger.error(e) + return HttpResponse(status=HTTP_500_INTERNAL_SERVER_ERROR) + + return HttpResponse(status=HTTP_204_NO_CONTENT) + + +class TeamAirflowSecretFetchView(views.APIView): + permission_classes = [IsAuthenticated] + + def get(self, request, slug: str): + """This fetches secrets for a given environment's team airflow (slug) + and retrieves an appropriately redacted set for the user's local airflow. + The user should use their API_TOKEN from the DATACOVES__API_TOKEN + variable. + + The returned structure is: + + { + "variables": [ + { + "key": "key", + "value": "value", + "description": "description", + "is_redacted": True/False + }, + ... + ], + "connections": [ + { + "connection_id": "conn_id", + "conn_type": "conn_type", + "description": "description", + "host": "host", + "schema": "schema", + "login": "login", + "password": None, - This will never be set + "port": 1234, + "extra": "string", + "is_redacted": True - this will always be true + }, + ... + ] + } + """ + + # This restricts to environments the user has access to. + env = request.user.environments.filter(slug=slug).first() + + if env is None: + return HttpResponse(status=HTTP_404_NOT_FOUND) + + # Can the user use local airflow? + if not request.user.can_use_local_airflow(env): + return HttpResponse(status=HTTP_403_FORBIDDEN) + + # Grab an API instance + try: + api = AirflowAPI.for_environment_service_user(env) + + return Response( + { + "connections": [ + dict( + api.get_connection(x["connection_id"]), + is_redacted=True, + extra={}, # extra often has secrets in it + ) + for x in api.get_connections() + ], + "variables": [ + { + "key": x["key"], + "value": None + if is_secret_variable_name(x["key"]) + else x["value"], + "description": x["description"], + "is_redacted": is_secret_variable_name(x["key"]), + } + for x in api.get_variables() + if x["key"] + not in ( + "datacoves-primary-secret", + "datacoves-secondary-secret", + "datacoves-dbt-api-secret", + ) + ], + } + ) + except ConfigIsMissingException as e: + return HttpResponse(status=HTTP_404_NOT_FOUND, content=str(e)) + + except Exception as e: + logger.error(e) + return HttpResponse( + status=HTTP_500_INTERNAL_SERVER_ERROR, content="Unexpected error." + ) diff --git a/src/core/api/app/pytest.ini b/src/core/api/app/pytest.ini new file mode 100644 index 00000000..71f9a5d3 --- /dev/null +++ b/src/core/api/app/pytest.ini @@ -0,0 +1,9 @@ +[pytest] +DJANGO_SETTINGS_MODULE=datacoves.integration_tests_settings +transactional=True +asyncio_mode=auto +asyncio_default_fixture_loop_scope=function,class,module,package,session +addopts=-p no:warnings +filterwarnings = + ignore:.*U.*mode is deprecated:DeprecationWarning + ignore:.*Django now detects this configuration.*:django.utils.deprecation.RemovedInDjango41Warning \ No newline at end of file diff --git a/src/core/api/app/run.sh b/src/core/api/app/run.sh new file mode 100755 index 00000000..c5c3b3bf --- /dev/null +++ b/src/core/api/app/run.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +if [ "$1" == "worker" ]; then + + queue="$2" + echo "Starting in Worker Mode (queue $queue)" + # FIXME: Added --without-mingle --without-gossip bec of an issue on celery/redis + # https://github.com/celery/celery/discussions/7276 + WORKER=1 exec su abc -c "celery -A datacoves worker -Q $queue -l INFO -E --without-mingle --without-gossip" + +elif [ "$1" == "worker-reload" ]; then + + queue="$2" + echo "Starting in Worker Reload Mode (queue $queue)" + exec ./manage.py runcelery "$queue" + +elif [ "$1" == "beat" ]; then + + echo "Starting in Beat Mode" + WORKER=1 exec su abc -c "celery -A datacoves beat -l INFO --scheduler django_celery_beat.schedulers:DatabaseScheduler" + +elif [ "$1" == "flower" ]; then + + echo "Starting in Flower Mode" + exec celery -A datacoves flower --basic_auth=${FLOWER_USERNAME}:${FLOWER_PASSWORD} + +elif [ "$1" == "local" ]; then + + echo "Starting in Local Mode" + exec tail -f /dev/null + +elif [ "$1" == "dev" ]; then + + echo "Starting in Dev Mode with user 'abc'" + su abc -c "/usr/src/app/manage.py runserver 0:8000" + +else + + echo "Starting in Web Server Mode" + # FIXME: Move migration script to a k8s job if replicas need to be greater than 1 + python manage.py collectstatic --noinput && \ + # exec uwsgi --yaml ../uwsgi.yaml + exec daphne -b 0.0.0.0 -p 8000 datacoves.asgi:application + +fi diff --git a/src/core/api/app/scripts/001_migrate_permissions_from_sysadmin_to_admin.py b/src/core/api/app/scripts/001_migrate_permissions_from_sysadmin_to_admin.py new file mode 100644 index 00000000..cec2832d --- /dev/null +++ b/src/core/api/app/scripts/001_migrate_permissions_from_sysadmin_to_admin.py @@ -0,0 +1,127 @@ +# ./manage.py runscript 001_migrate_permissions_from_sysadmin_to_admin + +import logging + +from clusters.tasks import setup_airflow_roles +from django.conf import settings +from django.contrib.auth.models import Group, Permission +from django.db import transaction +from projects.models import Environment, Project +from users.models import ExtendedGroup, User + +logger = logging.getLogger(__name__) + + +def run(): + with transaction.atomic(): + # Step 1: Fixg extended groups + logger.info("1. Fixing extended groups") + logger.info("--------------------------------") + extended_groups = ExtendedGroup.objects.all() + for ex_group in extended_groups: + if ex_group.environment: + ex_group.project = ex_group.environment.project + ex_group.save() + + # Step 2: Create permissions and groups for projects + logger.info("2. Creating projects permissions") + logger.info("--------------------------------") + projects = Project.objects.all() + for project in projects: + logger.info( + f"Creating permissions for project: {project.name} ({project.slug})" + ) + project.create_permissions() + project.create_project_groups(force_update=True) + + # Step 3: Create permissions and groups for environments + logger.info("3. Creating environments permissions") + logger.info("------------------------------------") + environments = Environment.objects.all() + for env in environments: + logger.info( + f"Creating permissions for environment: {env.name} ({env.slug})" + ) + env.create_permissions() + env.create_environment_groups(force_update=True) + + # Step 4: Migrate users from sysadmin groups to admin groups + logger.info("4. Migrating groups for users") + logger.info("-----------------------------") + groups = Group.objects.all() + users = User.objects.all() + sysadmin_groups = ["project sys admins", "environment sys admins"] + for user in users: + for group in user.groups.all(): + if any(sys_group in group.name for sys_group in sysadmin_groups): + # Find corresponding admin group by removing " sys" from the group name + new_group_name = group.name.replace(" sys admins", " admins") + new_group = groups.filter(name=new_group_name).first() + + if new_group: + logger.info( + f"User {user.email} moved from {group.name} to {new_group.name}" + ) + user.groups.remove(group) + user.groups.add(new_group) + + # Step 5: Migrate permission from sysadmin groups to admin groups + logger.info("5. Migrating groups sysadmin to admin") + logger.info("-------------------------------------") + permissions = Permission.objects.all() + for group in groups.filter(name__contains="sys admins"): + for perm in group.permissions.all(): + if "admin" in perm.name: + new_perm = permissions.filter( + name=perm.name.replace("admin", "sysadmin") + ).first() + if new_perm: + logger.info( + f"Group {group.name} permission moved from {perm.name} to {new_perm.name}" + ) + group.permissions.remove(perm) + group.permissions.add(new_perm) + + if "datahub:admin" in perm.name: + new_perm = permissions.filter( + name=perm.name.replace("admin", "data") + ).first() + if new_perm: + logger.info( + f"Group {group.name} permission moved from {perm.name} to {new_perm.name}" + ) + group.permissions.remove(perm) + group.permissions.add(new_perm) + + # Step 6: Fix service account users. + logger.info("6. Fixing service account permissions") + logger.info("-------------------------------------") + users = User.objects.filter(is_service_account=True) + + for sa_user in users: + env = Environment.objects.filter( + slug=sa_user.email.split("@")[0].split("-")[1] + ).first() + + if not env: + continue + + for perm in [ + f"{env.slug}|workbench:{settings.SERVICE_AIRFLOW}|{settings.ACTION_WRITE}", + f"{env.slug}|workbench:{settings.SERVICE_AIRFLOW}:admin|{settings.ACTION_WRITE}", + ]: + airflow_permission = Permission.objects.get(name__contains=perm) + sa_user.user_permissions.add(airflow_permission) + + # Step 7: Creating new roles in Airflow + logger.info("7. Creating new roles in Airflow") + logger.info("--------------------------------") + for env in Environment.objects.all(): + if env.is_service_enabled_and_valid(settings.SERVICE_AIRFLOW): + if env.airflow_config.get("api_enabled"): + try: + setup_airflow_roles(env_slug=env.slug) + except Exception as e: + logger.error(f"Error setting up roles for {env.slug}: {e}") + + logger.info("Migration done!") diff --git a/src/core/api/app/scripts/002_update_airflow_config.py b/src/core/api/app/scripts/002_update_airflow_config.py new file mode 100644 index 00000000..6eec3e11 --- /dev/null +++ b/src/core/api/app/scripts/002_update_airflow_config.py @@ -0,0 +1,46 @@ +# ./manage.py runscript 002_update_airflow_config + +import logging + +from django.db import transaction +from projects.models import Environment + +logger = logging.getLogger(__name__) + + +def run(): + with transaction.atomic(): + # Step 1: Create permissions and groups for projects + logger.info("Updating Airflow configuration") + environments = Environment.objects.only("airflow_config") + # We change these properties to custom_envs + props_to_delete = [ + { + "name": "default_task_retries", + "env": "AIRFLOW__CORE__DEFAULT_TASK_RETRIES", + }, + {"name": "worker_pods_pending_timeout", "env": None}, + {"name": "settings", "env": None}, + ] + for env in environments: + logger.info("---------------------------------") + logger.info(f"Environment: {env.name} ({env.slug})") + custom_envs = {} + for prop in props_to_delete: + prop_name = prop["name"] + prop_env = prop["env"] + logger.info(f"Deleting property: {prop_name}") + if prop_name in env.airflow_config: + if prop_env is not None: + custom_envs[prop_env] = env.airflow_config[prop_name] + + del env.airflow_config[prop_name] + logger.info(f'"{prop_name}" deleted.') + + else: + logger.info(f'Property "{prop_name}" does not exist.') + + env.airflow_config["custom_envs"] = custom_envs + env.save() + + logger.info("Update done!") diff --git a/src/core/api/app/scripts/003_update_environment_secrets.py b/src/core/api/app/scripts/003_update_environment_secrets.py new file mode 100644 index 00000000..095bbf4f --- /dev/null +++ b/src/core/api/app/scripts/003_update_environment_secrets.py @@ -0,0 +1,27 @@ +# ./manage.py runscript 002_update_environment_secrets + +import logging + +from django.db import transaction +from projects.models import Environment + +logger = logging.getLogger(__name__) + + +def run(): + with transaction.atomic(): + logger.info("Updating Environment configuration") + environments = Environment.objects.only("airflow_config") + + for env in environments: + logger.info("---------------------------------") + logger.info(f"Environment: {env.name} ({env.slug})") + + if env.airflow_config.get("secrets_backend_enabled", True) is not False: + logger.info("Had to disable secrets backend") + env.airflow_config["secrets_backend_enabled"] = False + env.save() + else: + logger.info("Secrets backend already disabled, no action") + + logger.info("Update done!") diff --git a/src/core/api/app/scripts/README.md b/src/core/api/app/scripts/README.md new file mode 100644 index 00000000..fd537863 --- /dev/null +++ b/src/core/api/app/scripts/README.md @@ -0,0 +1,9 @@ +# Script + +This package contains scripts that can be executed as Django commands. + +```bash +./manage.py runscript migrate_permissions_from_sysadmin_to_admin +``` + +[More information](https://django-extensions.readthedocs.io/en/latest/runscript.html) \ No newline at end of file diff --git a/src/core/api/app/scripts/__init__.py b/src/core/api/app/scripts/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/scripts/check_permissions_by_service.py b/src/core/api/app/scripts/check_permissions_by_service.py new file mode 100644 index 00000000..803651f0 --- /dev/null +++ b/src/core/api/app/scripts/check_permissions_by_service.py @@ -0,0 +1,293 @@ +""" +Examples: + - ./manage.py runscript check_permissions_by_service + - ./manage.py runscript check_permissions_by_service --script-args save-to-file +""" + +import csv + +from django.conf import settings +from django.db.models import Q +from projects.models import Environment, Project +from rich.console import Console +from rich.table import Table +from users.models import Account +from users.models import Permission as PermissionModel +from users.models.permission import parse_permission_name + +SAVE_TO_FILES = False + + +def run(*args): + if "save-to-file" in args: + global SAVE_TO_FILES + SAVE_TO_FILES = True + + env = Environment.objects.first() + # env.create_environment_groups(force_update=True) + # env.project.create_project_groups(force_update=True) + # environment_groups(env) + # project_groups(env.project) + # account_groups(env.project.account) + service_resource_permissions(env) + + +def print_or_save_table(table: Table, filename: str): + if SAVE_TO_FILES: + console = Console(file=open(f"scripts/{filename}", "w")) + else: + console = Console() + console.print(table) + + +def service_resource_permissions(env: Environment = None): + def get_service_map(service): + if service == settings.SERVICE_DATAHUB: + if res in ["*|write", "admin|write"]: + return "Admin" + elif res == "data|write": + return "Editor" + else: + return "Reader" + + elif service == settings.SERVICE_AIRFLOW: + if res in ["*|write", "security|write"]: + return "Admin" + elif res == "admin|write": + return "Op" + elif res == "sysadmin|write": + return "SysAdmin" + elif res == "dags|write": + return "User" + else: + return "Viewer" + + elif service == settings.SERVICE_SUPERSET: + if res in ["*|write", "security|write"]: + return "Admin" + elif res == "data-sources|write": + return "Alpha" + else: + return "Gamma" + + elif service == settings.INTERNAL_SERVICE_GRAFANA: + if res in "*|write": + return "GrafanaAdmin" + elif res in "configuration|write": + return "Admin" + elif res in "dashboards|write": + return "Editor" + elif res in "dashboards|read": + return "Viewer" + return "????" + + else: + return "???" + + table = Table(title="Mapping services and permissions") + table.add_column("Services", style="cyan") + table.add_column("Permissions", style="magenta") + table.add_column("Datacoves Role", style="magenta") + table.add_column("Service Map", style="magenta") + table.add_column("Service Role", style="magenta") + + csv_data = [] + permissions_all = PermissionModel.objects.all() + for service in settings.SERVICES + settings.INTERNAL_SERVICES: + project_slug = env.project.slug + env_slug = env.slug + permissions = permissions_all.filter( + Q(name__icontains=f"{project_slug}|workbench:{service}") + | Q(name__icontains=f"{env_slug}|workbench:{service}") + | Q(name__icontains=f"|services:{service}") + ) + + data = [] + for permission in permissions: + groups = [] + for group in permission.group_set.all(): + groups.append( + group.name.replace(env_slug, "").replace( + project_slug, "" + ) + ) + + name = parse_permission_name(permission) + action = name["action"] + resource = name["resource"].split(":")[2:] + res = f"{resource[0] if resource else '*'}|{action}" + row = [ + service, + permission.name.replace(env_slug, "").replace( + project_slug, "" + ), + "|".join(groups), + res, + get_service_map(service), + ] + csv_data.append(row) + data.append(row) + + data.sort(key=lambda x: (x[0], x[3])) + for row in data: + table.add_row(row[0], row[1], "\n".join(row[2].split("|")), row[3], row[4]) + + if SAVE_TO_FILES: + with open( + "scripts/service_mapping_permissions.csv", "w", newline="" + ) as archivo: + writer = csv.writer(archivo) + for fila in csv_data: + writer.writerow(fila) + + print_or_save_table(table=table, filename="service_mapping_permissions_table.txt") + + +class Group: + def __init__(self, name): + self.name = name + self._permissions = [] + + def add_permission(self, permission): + self._permissions.append(permission) + + @property + def permissions(self): + return "\n".join(self._permissions) + + +class Service: + def __init__(self, name): + self.name = name + self._groups = [] + + def add_group(self, group_name): + g = list(filter(lambda g: g.name == group_name, self._groups)) + if g: + return g[0] + group = Group(group_name) + self._groups.append(group) + return group + + @property + def groups(self): + return "\n".join( + [ + f"Group: {g.name}:\n---------------------------------\n{g.permissions}\n" + for g in self._groups + ] + ) + + @staticmethod + def get_service(service_name, services): + service = list(filter(lambda s: s.name == service_name, services)) + if service: + return service[0] + + service = Service(service_name) + services.append(service) + return service + + +def environment_groups(env: Environment): + table = Table(title="Groups by Environment") + table.add_column("Services", style="cyan") + table.add_column("Groups and Permissions", style="magenta") + + services = [] + for service_name in settings.SERVICES: + Service.get_service(service_name, services) + + for _, permissions, group_name in env.roles_and_permissions: + permission_filter = Q(name__endswith=permissions[0]) + for permission in permissions[1:]: + permission_filter |= Q(name__endswith=permission) + + permissions_to_add = env.environment_level_permissions.filter( + permission_filter + ).order_by("name") + + for p in permissions_to_add: + service = list(filter(lambda s: s in p.name, settings.SERVICES)) + service_name = service[0] + service = Service.get_service(service_name, services) + group = service.add_group(group_name) + group.add_permission(p.name.replace(env.slug, "")) + + csv_data = [] + for service in services: + table.add_row(service.name, service.groups) + + for group in service._groups: + for permission in group._permissions: + csv_data.append([service.name, group.name, permission]) + + if SAVE_TO_FILES: + with open("scripts/groups_by_environment.csv", "w", newline="") as archivo: + writer = csv.writer(archivo) + for fila in csv_data: + writer.writerow(fila) + + print_or_save_table(table=table, filename="groups_by_environment_table.txt") + + +def project_groups(project: Project): + table = Table(title="Groups by Project") + table.add_column("Services", style="cyan") + table.add_column("Groups and Permissions", style="magenta") + + services = [] + for service_name in settings.SERVICES: + Service.get_service(service_name, services) + + for _, permissions, group_name in project.roles_and_permissions: + permission_filter = Q(name__endswith=permissions[0]) + for permission in permissions[1:]: + permission_filter |= Q(name__endswith=permission) + + permissions_to_add = project.project_level_permissions.filter(permission_filter) + + for p in permissions_to_add: + service = list(filter(lambda s: s in p.name, settings.SERVICES)) + service_name = service[0] + service = Service.get_service(service_name, services) + group = service.add_group(group_name) + group.add_permission(p.name.replace(project.slug, "")) + + csv_data = [] + for service in services: + table.add_row(service.name, service.groups) + + for group in service._groups: + for permission in group._permissions: + csv_data.append([service.name, group.name, permission]) + + if SAVE_TO_FILES: + with open("scripts/groups_by_project.csv", "w", newline="") as archivo: + writer = csv.writer(archivo) + for fila in csv_data: + writer.writerow(fila) + + print_or_save_table(table=table, filename="groups_by_project_table.txt") + + +def account_groups(account: Account): + table = Table(title="Groups by Account") + table.add_column("Groups", style="cyan") + table.add_column("Permissions", style="magenta") + + table.add_row("account default", "") + + csv_data = [] + for permission in account.account_level_permissions: + group_name = "account admins" + table.add_row(group_name, permission.name) + csv_data.append([group_name, permission]) + + if SAVE_TO_FILES: + with open("scripts/groups_by_account.csv", "w", newline="") as archivo: + writer = csv.writer(archivo) + for fila in csv_data: + writer.writerow(fila) + + print_or_save_table(table=table, filename="groups_by_account_table.txt") diff --git a/src/core/api/app/users/__init__.py b/src/core/api/app/users/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/users/admin.py b/src/core/api/app/users/admin.py new file mode 100644 index 00000000..90deb256 --- /dev/null +++ b/src/core/api/app/users/admin.py @@ -0,0 +1,303 @@ +import operator +from functools import reduce + +from billing.models import Credit +from django.conf import settings +from django.contrib import admin, messages +from django.contrib.auth.admin import UserAdmin as BaseUserAdmin +from django.contrib.auth.models import Permission +from django.db import models +from django.db.models import Count, Max, Q +from django_json_widget.widgets import JSONEditorWidget +from django_object_actions import DjangoObjectActions + +from datacoves.admin import ( + BaseModelAdmin, + DeactivatedDateFilter, + DefaultNoBooleanFilter, +) + +from .models import Account, ExtendedGroup, User + + +class ProjectFilter(admin.SimpleListFilter): + title = "Project" + parameter_name = "project__id" + + def lookups(self, request, model_admin): + filter_names = ( + "account__id", + "environment__id", + ) + filter_clauses = [ + Q((filter, request.GET[filter])) + for filter in filter_names + if request.GET.get(filter) + ] + + if filter_clauses: + projects = set( + [ + ext_group.project + for ext_group in model_admin.model.objects.all() + .filter(reduce(operator.and_, filter_clauses)) + .filter(project__isnull=False) + ] + ) + else: + projects = set( + [ + ext_group.project + for ext_group in model_admin.model.objects.filter( + project__isnull=False + ) + ] + ) + return [(p.id, p.name) for p in sorted(projects, key=lambda p: p.name)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(project__id__exact=self.value()) + + +class AccountFilter(admin.SimpleListFilter): + title = "Account" + parameter_name = "account__id" + + def lookups(self, request, model_admin): + filter_names = ( + "environment__id", + "project__id", + ) + filter_clauses = [ + Q((filter, request.GET[filter])) + for filter in filter_names + if request.GET.get(filter) + ] + + if filter_clauses: + accounts = set( + [ + ext_group.account + for ext_group in model_admin.model.objects.all() + .filter(reduce(operator.and_, filter_clauses)) + .filter(account__isnull=False) + ] + ) + else: + accounts = set( + [ + ext_group.account + for ext_group in model_admin.model.objects.filter( + account__isnull=False + ) + ] + ) + return [(a.id, a.name) for a in sorted(accounts, key=lambda a: a.name)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(project__account__id__exact=self.value()) + + +class EnvironmentFilter(admin.SimpleListFilter): + title = "Environment" + parameter_name = "environment__id" + + def lookups(self, request, model_admin): + filter_names = ( + "account__id", + "project__id", + ) + filter_clauses = [ + Q((filter, request.GET[filter])) + for filter in filter_names + if request.GET.get(filter) + ] + + if filter_clauses: + envs = set( + [ + ext_group.environment + for ext_group in model_admin.model.objects.all() + .filter(reduce(operator.and_, filter_clauses)) + .filter(environment__isnull=False) + ] + ) + else: + envs = set( + [ + ext_group.environment + for ext_group in model_admin.model.objects.filter( + environment__isnull=False + ) + ] + ) + return [(env.id, env.slug) for env in sorted(envs, key=lambda env: env.slug)] + + def queryset(self, request, queryset): + if self.value(): + return queryset.filter(environment__id__exact=self.value()) + + +class RoleFilter(admin.SimpleListFilter): + title = "Role" + parameter_name = "role__id" + + def lookups(self, request, model_admin): + return [ + ("developer", "Developer"), + ("admin", "Admin"), + ("sysadmin", "SysAdmin"), + ("viewer", "Viewer"), + ] + + def queryset(self, request, queryset): + val = self.value() + role = None + + if val == "developer": + role = f"|workbench:{settings.SERVICE_CODE_SERVER}" + elif val == "admin": + role = ":admin" + elif val == "sysadmin": + role = ":sysadmin" + elif val == "viewer": + role = "|read" + + if role: + return queryset.filter(groups__permissions__name__contains=role).distinct() + + return queryset + + +@admin.register(User) +class UserAdmin(BaseModelAdmin, BaseUserAdmin): + fieldsets = ( + (None, {"fields": ("email", "password", "settings")}), + ("Personal info", {"fields": ("name", "avatar")}), + ( + "Permissions", + { + "fields": ( + "deactivated_at", + "is_superuser", + "groups", + "user_permissions", + "setup_enabled", + ), + }, + ), + ("Important dates", {"fields": ("last_login",)}), + ) + add_fieldsets = ( + ( + None, + { + "classes": ("wide",), + "fields": ("email", "password1", "password2"), + }, + ), + ) + list_display = ("name", "email", "slug", "is_superuser", "is_service_account") + list_filter = ( + "is_superuser", + ("deactivated_at", DeactivatedDateFilter), + "groups", + "groups__extended_group__account", + "groups__extended_group__project", + "groups__extended_group__environment", + ("is_service_account", DefaultNoBooleanFilter), + (RoleFilter), + ) + search_fields = ("name", "email") + ordering = ("email",) + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + filter_horizontal = ( + "groups", + "user_permissions", + ) + + +class CreditInline(BaseModelAdmin, admin.TabularInline): + model = Credit + extra = 0 + + +@admin.register(Account) +class AccountAdmin(BaseModelAdmin, DjangoObjectActions, admin.ModelAdmin): + def create_permissions(self, request, obj): + obj.create_permissions() + obj.create_account_groups() + messages.add_message( + request, + messages.INFO, + "Project default groups and permissions successfully created.", + ) + + def user_environments(self, obj): + return obj.user_environments + + def last_activity_at(self, obj): + return obj.last_activity_at + + def active_developers(self, obj): + return obj.developers.count() + + def get_queryset(self, request): + qs = ( + super() + .get_queryset(request) + .annotate( + user_environments=Count("projects__environments__user_environments"), + last_activity_at=Max( + "projects__environments__user_environments__heartbeat_at" + ), + ) + ) + return qs + + create_permissions.label = "Create Permissions" + create_permissions.short_description = "Create missing permissions for this account" + change_actions = ("create_permissions",) + + inlines = [CreditInline] + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + readonly_fields = ( + "slug", + "airflow_workers_minutes_sum", + "airbyte_workers_minutes_sum", + "current_cycle_start", + ) + list_display = ( + "name", + "slug", + "plan", + "variant", + "customer_id", + "user_environments", + "last_activity_at", + "active_developers", + ) + list_filter = ("plan", ("deactivated_at", DeactivatedDateFilter)) + search_fields = ("name",) + ordering = ("name", "slug") + + +@admin.register(ExtendedGroup) +class ExtendedGroupAdmin(BaseModelAdmin, admin.ModelAdmin): + list_display = ("account", "name", "group", "role", "identity_groups") + list_filter = ((AccountFilter), (ProjectFilter), (EnvironmentFilter), "role") + search_fields = ("name", "identity_groups") + ordering = ("group",) + formfield_overrides = { + models.JSONField: {"widget": JSONEditorWidget}, + } + search_fields = ("name",) + + +admin.site.register(Permission) diff --git a/src/core/api/app/users/apps.py b/src/core/api/app/users/apps.py new file mode 100644 index 00000000..1abd8671 --- /dev/null +++ b/src/core/api/app/users/apps.py @@ -0,0 +1,9 @@ +from django.apps import AppConfig + + +class UsersConfig(AppConfig): + default_auto_field = "django.db.models.BigAutoField" + name = "users" + + def ready(self): + from . import signals # noqa F401 diff --git a/src/core/api/app/users/management/__init__.py b/src/core/api/app/users/management/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/users/management/commands/__init__.py b/src/core/api/app/users/management/commands/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/users/management/commands/user_send_message.py b/src/core/api/app/users/management/commands/user_send_message.py new file mode 100644 index 00000000..01d87c96 --- /dev/null +++ b/src/core/api/app/users/management/commands/user_send_message.py @@ -0,0 +1,60 @@ +import json +import sys + +import channels.layers +from asgiref.sync import async_to_sync +from django.core.management.base import BaseCommand +from users.models.user import User + + +class Command(BaseCommand): + """Example of how to send a message to the user from the backend, such as a notification. + + Exampel: + ./manage.py user_send_message \ + --account-slug local \ + --user hey@datacoves.com \ + --content "Hello from Datacoves" + """ + + help = "Send a message to the user" + + def add_arguments(self, parser): + parser.add_argument("--account-slug", help="Account slug.", required=True) + parser.add_argument("--user-email", help="Email user.", required=True) + parser.add_argument("--title", help="Title", default="Message") + parser.add_argument("--content", help="Content.", required=True) + parser.add_argument( + "--status", help="info|success|warning|error", default="info" + ) + + def handle(self, *args, **options): + account_slug = options.get("account_slug") + user_email = options.get("user_email") + title = options.get("title") + content = options.get("content") + status = options.get("status") + + try: + user = User.objects.only("id").get(email=user_email) + + group_name = ( + f"workspace_user_account_slug_{account_slug}_user_slug_{user.slug}" + ) + channel_layer = channels.layers.get_channel_layer() + payload = {"status": status, "title": title, "content": content} + + async_to_sync(channel_layer.group_send)( + group_name, + { + "type": "user.notification", + "message_type": "user.toast", + "message": json.dumps(payload), + }, + ) + + sys.stdout.write(f"Message sent to: {group_name}\n") + + except User.DoesNotExist: + sys.stdout.write("User invalid") + return diff --git a/src/core/api/app/users/migrations/0001_initial.py b/src/core/api/app/users/migrations/0001_initial.py new file mode 100644 index 00000000..b03af8f2 --- /dev/null +++ b/src/core/api/app/users/migrations/0001_initial.py @@ -0,0 +1,73 @@ +# Generated by Django 3.2.6 on 2022-03-03 16:08 + +import autoslug.fields +from django.conf import settings +from django.db import migrations, models +import django.db.models.deletion +import uuid + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ('projects', '0001_initial'), + ('auth', '0012_alter_user_first_name_max_length'), + ] + + operations = [ + migrations.CreateModel( + name='User', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('eid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('password', models.CharField(max_length=128, verbose_name='password')), + ('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')), + ('email', models.EmailField(max_length=254, unique=True, verbose_name='email')), + ('name', models.CharField(blank=True, max_length=130, null=True)), + ('avatar', models.CharField(blank=True, max_length=200, null=True)), + ('deactivated_at', models.DateTimeField(blank=True, null=True)), + ('is_superuser', models.BooleanField(default=False)), + ('settings', models.JSONField(blank=True, default=dict)), + ('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')), + ('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='Account', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('created_at', models.DateTimeField(auto_now_add=True)), + ('updated_at', models.DateTimeField(auto_now=True)), + ('name', models.CharField(max_length=50)), + ('settings', models.JSONField(blank=True, default=dict, null=True)), + ('deactivated_at', models.DateTimeField(blank=True, null=True)), + ('subscription_id', models.CharField(blank=True, help_text='Stripe subscription id', max_length=130, null=True)), + ('subscription_payload', models.JSONField(blank=True, default=dict, null=True)), + ('last_requested_at', models.DateTimeField(blank=True, null=True)), + ('slug', autoslug.fields.AutoSlugField(editable=False, populate_from='name', unique=True)), + ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)), + ], + options={ + 'abstract': False, + }, + ), + migrations.CreateModel( + name='ExtendedGroup', + fields=[ + ('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), + ('description', models.TextField(blank=True, null=True)), + ('identity_groups', models.JSONField(blank=True, default=list, null=True)), + ('role', models.CharField(blank=True, choices=[('default', 'Default'), ('account_admin', 'Account Admin'), ('project_developer', 'Project Developer'), ('project_viewer', 'Project Viewer')], max_length=30, null=True)), + ('account', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.account')), + ('group', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='extended_group', to='auth.group')), + ('project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='projects.project')), + ], + ), + ] diff --git a/src/core/api/app/users/migrations/0002_user_is_service_account.py b/src/core/api/app/users/migrations/0002_user_is_service_account.py new file mode 100644 index 00000000..423020eb --- /dev/null +++ b/src/core/api/app/users/migrations/0002_user_is_service_account.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-03-02 19:02 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0001_initial'), + ] + + operations = [ + migrations.AddField( + model_name='user', + name='is_service_account', + field=models.BooleanField(default=False), + ), + ] diff --git a/src/core/api/app/users/migrations/0003_user_slug.py b/src/core/api/app/users/migrations/0003_user_slug.py new file mode 100644 index 00000000..58d4b8a7 --- /dev/null +++ b/src/core/api/app/users/migrations/0003_user_slug.py @@ -0,0 +1,41 @@ +# Generated by Django 3.2.6 on 2022-09-21 15:55 + +import autoslug.fields +from django.db import migrations +import users.models + + +def set_users_slug(apps, schema_editor): + User = apps.get_model("users", "User") + for user in User.objects.all(): + user.slug = f"{user.email[:3]}-{user.id}" + user.save() + + +class Migration(migrations.Migration): + + dependencies = [ + ("users", "0002_user_is_service_account"), + ] + + operations = [ + migrations.AddField( + model_name="user", + name="slug", + field=autoslug.fields.AutoSlugField( + blank=True, + editable=False, + null=True, + populate_from=users.models.user_slug, + unique=True, + ), + ), + migrations.RunPython(set_users_slug), + migrations.AlterField( + model_name="user", + name="slug", + field=autoslug.fields.AutoSlugField( + editable=False, populate_from=users.models.user_slug, unique=True + ), + ), + ] diff --git a/src/core/api/app/users/migrations/0004_account_plan.py b/src/core/api/app/users/migrations/0004_account_plan.py new file mode 100644 index 00000000..6dec1a36 --- /dev/null +++ b/src/core/api/app/users/migrations/0004_account_plan.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.6 on 2022-10-13 18:19 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('billing', '0004_auto_20221013_1819'), + ('users', '0003_user_slug'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='plan', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='billing.plan'), + ), + ] diff --git a/src/core/api/app/users/migrations/0005_auto_20221014_1140.py b/src/core/api/app/users/migrations/0005_auto_20221014_1140.py new file mode 100644 index 00000000..ca6429f9 --- /dev/null +++ b/src/core/api/app/users/migrations/0005_auto_20221014_1140.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-10-14 11:40 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0004_account_plan'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='trial_ended_at', + field=models.DateTimeField(blank=True, null=True), + ), + migrations.AddField( + model_name='account', + name='trial_started_at', + field=models.DateTimeField(blank=True, null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0006_auto_20221020_1351.py b/src/core/api/app/users/migrations/0006_auto_20221020_1351.py new file mode 100644 index 00000000..0eef739b --- /dev/null +++ b/src/core/api/app/users/migrations/0006_auto_20221020_1351.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.6 on 2022-10-20 13:51 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0005_auto_20221014_1140'), + ] + + operations = [ + migrations.RenameField( + model_name='account', + old_name='subscription_payload', + new_name='subscription', + ), + migrations.RenameField( + model_name='account', + old_name='last_requested_at', + new_name='subscription_retrieved_at', + ), + ] diff --git a/src/core/api/app/users/migrations/0007_auto_20221028_1456.py b/src/core/api/app/users/migrations/0007_auto_20221028_1456.py new file mode 100644 index 00000000..e221bca7 --- /dev/null +++ b/src/core/api/app/users/migrations/0007_auto_20221028_1456.py @@ -0,0 +1,27 @@ +# Generated by Django 3.2.6 on 2022-10-28 14:56 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0006_auto_20221020_1351'), + ] + + operations = [ + migrations.RenameField( + model_name='account', + old_name='subscription_retrieved_at', + new_name='subscription_updated_at', + ), + migrations.RemoveField( + model_name='account', + name='subscription_id', + ), + migrations.AddField( + model_name='account', + name='customer_id', + field=models.CharField(blank=True, max_length=50, null=True, unique=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0008_rename_trial_ended_at_account_trial_ends_at.py b/src/core/api/app/users/migrations/0008_rename_trial_ended_at_account_trial_ends_at.py new file mode 100644 index 00000000..96ef4470 --- /dev/null +++ b/src/core/api/app/users/migrations/0008_rename_trial_ended_at_account_trial_ends_at.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-11-15 13:32 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0007_auto_20221028_1456'), + ] + + operations = [ + migrations.RenameField( + model_name='account', + old_name='trial_ended_at', + new_name='trial_ends_at', + ), + ] diff --git a/src/core/api/app/users/migrations/0009_account_workers_execution_limit.py b/src/core/api/app/users/migrations/0009_account_workers_execution_limit.py new file mode 100644 index 00000000..e06af72f --- /dev/null +++ b/src/core/api/app/users/migrations/0009_account_workers_execution_limit.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-11-15 18:49 + +import users.models.account +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0008_rename_trial_ended_at_account_trial_ends_at'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='workers_execution_limit', + field=models.JSONField(default=users.models.account.get_default_workers_execution_limit, null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0010_account_approve_billing_events.py b/src/core/api/app/users/migrations/0010_account_approve_billing_events.py new file mode 100644 index 00000000..9fc09ad4 --- /dev/null +++ b/src/core/api/app/users/migrations/0010_account_approve_billing_events.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.6 on 2022-12-06 18:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0009_account_workers_execution_limit'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='approve_billing_events', + field=models.BooleanField(default=True, help_text='Is approval required for billing events before they are informed?'), + ), + ] diff --git a/src/core/api/app/users/migrations/0011_account_notifications_enabled.py b/src/core/api/app/users/migrations/0011_account_notifications_enabled.py new file mode 100644 index 00000000..cad28b03 --- /dev/null +++ b/src/core/api/app/users/migrations/0011_account_notifications_enabled.py @@ -0,0 +1,19 @@ +# Generated by Django 3.2.6 on 2022-12-29 20:17 + +import users.models.account +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0010_account_approve_billing_events'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='notifications_enabled', + field=models.JSONField(blank=True, default=users.models.account.get_default_notifications_enabled, null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0012_auto_20230206_2225.py b/src/core/api/app/users/migrations/0012_auto_20230206_2225.py new file mode 100644 index 00000000..f1d2354a --- /dev/null +++ b/src/core/api/app/users/migrations/0012_auto_20230206_2225.py @@ -0,0 +1,25 @@ +# Generated by Django 3.2.16 on 2023-02-06 22:25 + +import django.db.models.deletion +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('projects', '0073_profile_files_from'), + ('users', '0011_account_notifications_enabled'), + ] + + operations = [ + migrations.AddField( + model_name='extendedgroup', + name='environment', + field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='projects.environment'), + ), + migrations.AlterField( + model_name='extendedgroup', + name='role', + field=models.CharField(blank=True, choices=[('default', 'Default'), ('account_admin', 'Account Admin'), ('project_developer', 'Project Developer'), ('project_viewer', 'Project Viewer'), ('environment_developer', 'Environment Developer'), ('environment_viewer', 'Environment Viewer')], max_length=30, null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0013_account_cancelled_subscription.py b/src/core/api/app/users/migrations/0013_account_cancelled_subscription.py new file mode 100644 index 00000000..0e477044 --- /dev/null +++ b/src/core/api/app/users/migrations/0013_account_cancelled_subscription.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-02-15 12:57 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0012_auto_20230206_2225'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='cancelled_subscription', + field=models.JSONField(blank=True, default=dict, null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0014_auto_20230303_1412.py b/src/core/api/app/users/migrations/0014_auto_20230303_1412.py new file mode 100644 index 00000000..5910a1e9 --- /dev/null +++ b/src/core/api/app/users/migrations/0014_auto_20230303_1412.py @@ -0,0 +1,24 @@ +# Generated by Django 3.2.16 on 2023-03-03 14:12 + +import users.models.account +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0013_account_cancelled_subscription'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='developer_licenses', + field=models.PositiveIntegerField(default=0, help_text='Max number of developer licenses (users with access to at least one code-server pod), zero means infinite.'), + ), + migrations.AlterField( + model_name='account', + name='workers_execution_limit', + field=models.JSONField(default=users.models.account.get_default_workers_execution_limit, help_text='max execution seconds allowed per period at account level', null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0015_alter_extendedgroup_role.py b/src/core/api/app/users/migrations/0015_alter_extendedgroup_role.py new file mode 100644 index 00000000..a4c92a35 --- /dev/null +++ b/src/core/api/app/users/migrations/0015_alter_extendedgroup_role.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.16 on 2023-04-14 20:16 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0014_auto_20230303_1412'), + ] + + operations = [ + migrations.AlterField( + model_name='extendedgroup', + name='role', + field=models.CharField(blank=True, choices=[('default', 'Default'), ('account_admin', 'Account Admin'), ('project_developer', 'Project Developer'), ('project_viewer', 'Project Viewer'), ('project_sysadmin', 'Project Sys Admin'), ('environment_developer', 'Environment Developer'), ('environment_viewer', 'Environment Viewer'), ('environment_sysadmin', 'Environment Sys Admin')], max_length=30, null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0016_remove_extendedgroup_description.py b/src/core/api/app/users/migrations/0016_remove_extendedgroup_description.py new file mode 100644 index 00000000..ef59cfbf --- /dev/null +++ b/src/core/api/app/users/migrations/0016_remove_extendedgroup_description.py @@ -0,0 +1,17 @@ +# Generated by Django 3.2.16 on 2023-07-28 13:08 + +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0015_alter_extendedgroup_role'), + ] + + operations = [ + migrations.RemoveField( + model_name='extendedgroup', + name='description', + ), + ] diff --git a/src/core/api/app/users/migrations/0017_account_variant.py b/src/core/api/app/users/migrations/0017_account_variant.py new file mode 100644 index 00000000..77478d41 --- /dev/null +++ b/src/core/api/app/users/migrations/0017_account_variant.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2023-11-15 22:12 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0016_remove_extendedgroup_description'), + ] + + operations = [ + migrations.AddField( + model_name='account', + name='variant', + field=models.CharField(default='standard', max_length=32), + ), + ] diff --git a/src/core/api/app/users/migrations/0018_alter_account_slug.py b/src/core/api/app/users/migrations/0018_alter_account_slug.py new file mode 100644 index 00000000..1500d1d7 --- /dev/null +++ b/src/core/api/app/users/migrations/0018_alter_account_slug.py @@ -0,0 +1,20 @@ +# Generated by Django 3.2.20 on 2024-01-18 22:32 + +import autoslug.fields +import users.models.account +from django.db import migrations + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0017_account_variant'), + ] + + operations = [ + migrations.AlterField( + model_name='account', + name='slug', + field=autoslug.fields.AutoSlugField(editable=False, populate_from=users.models.account.account_slug, unique=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0019_extendedgroup_name.py b/src/core/api/app/users/migrations/0019_extendedgroup_name.py new file mode 100644 index 00000000..e6ced58f --- /dev/null +++ b/src/core/api/app/users/migrations/0019_extendedgroup_name.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.20 on 2024-02-05 15:32 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0018_alter_account_slug'), + ] + + operations = [ + migrations.AddField( + model_name='extendedgroup', + name='name', + field=models.CharField(blank=True, max_length=100, null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0020_extendedgroup_set_name.py b/src/core/api/app/users/migrations/0020_extendedgroup_set_name.py new file mode 100644 index 00000000..e00f4812 --- /dev/null +++ b/src/core/api/app/users/migrations/0020_extendedgroup_set_name.py @@ -0,0 +1,31 @@ +# Generated by Django 3.2.20 on 2024-02-05 15:34 + +from django.db import migrations + + +def create_name(extended_group): + if extended_group.environment: + return f"{extended_group.environment.name} ({extended_group.environment.slug}) {extended_group.get_role_display()}" + if extended_group.project: + return f"{extended_group.project.name} {extended_group.get_role_display()}" + if extended_group.role: + return f"{extended_group.account.name} {extended_group.get_role_display()}" + else: + return extended_group.group.name.replace(f"'{extended_group.account.slug}' ", "") + +def add_name(apps, schema_editor): + ExtendedGroup = apps.get_model('users', 'ExtendedGroup') + + for extended_group in ExtendedGroup.objects.all(): + extended_group.name = create_name(extended_group) + extended_group.save() + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0019_extendedgroup_name'), + ] + + operations = [ + migrations.RunPython(add_name) + ] diff --git a/src/core/api/app/users/migrations/0021_auto_20240701_2118.py b/src/core/api/app/users/migrations/0021_auto_20240701_2118.py new file mode 100644 index 00000000..2142e1a8 --- /dev/null +++ b/src/core/api/app/users/migrations/0021_auto_20240701_2118.py @@ -0,0 +1,28 @@ +# Generated by Django 3.2.20 on 2024-07-01 21:18 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('users', '0020_extendedgroup_set_name'), + ] + + operations = [ + migrations.AlterField( + model_name='account', + name='cancelled_subscription', + field=models.JSONField(blank=True, default=dict, help_text='The cancelled subscription object, if it has been cancelled', null=True), + ), + migrations.AlterField( + model_name='account', + name='subscription', + field=models.JSONField(blank=True, default=dict, help_text='This subscription object comes from Stripe.', null=True), + ), + migrations.AlterField( + model_name='extendedgroup', + name='identity_groups', + field=models.JSONField(blank=True, default=list, help_text='A list of groups from the external identity source which will map to this group with the given role.', null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0022_user_setup_enabled.py b/src/core/api/app/users/migrations/0022_user_setup_enabled.py new file mode 100644 index 00000000..d5c0f713 --- /dev/null +++ b/src/core/api/app/users/migrations/0022_user_setup_enabled.py @@ -0,0 +1,18 @@ +# Generated by Django 5.0.7 on 2024-08-23 12:38 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ("users", "0021_auto_20240701_2118"), + ] + + operations = [ + migrations.AddField( + model_name="user", + name="setup_enabled", + field=models.BooleanField(null=True), + ), + ] diff --git a/src/core/api/app/users/migrations/0023_alter_extendedgroup_role.py b/src/core/api/app/users/migrations/0023_alter_extendedgroup_role.py new file mode 100644 index 00000000..df08e4c9 --- /dev/null +++ b/src/core/api/app/users/migrations/0023_alter_extendedgroup_role.py @@ -0,0 +1,33 @@ +# Generated by Django 5.0.7 on 2024-12-10 21:55 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("users", "0022_user_setup_enabled"), + ] + + operations = [ + migrations.AlterField( + model_name="extendedgroup", + name="role", + field=models.CharField( + blank=True, + choices=[ + ("default", "Default"), + ("account_admin", "Account Admin"), + ("project_developer", "Project Developer"), + ("project_viewer", "Project Viewer"), + ("project_sysadmin", "Project Sys Admin"), + ("project_admin", "Project Admin"), + ("environment_developer", "Environment Developer"), + ("environment_viewer", "Environment Viewer"), + ("environment_sysadmin", "Environment Sys Admin"), + ("environment_admin", "Environment Admin"), + ], + max_length=30, + null=True, + ), + ), + ] diff --git a/src/core/api/app/users/migrations/__init__.py b/src/core/api/app/users/migrations/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/core/api/app/users/models/__init__.py b/src/core/api/app/users/models/__init__.py new file mode 100644 index 00000000..884dc8d5 --- /dev/null +++ b/src/core/api/app/users/models/__init__.py @@ -0,0 +1,4 @@ +from .account import * # noqa: F401,F403 +from .group import * # noqa: F401,F403 +from .permission import * # noqa: F401,F403 +from .user import * # noqa: F401,F403 diff --git a/src/core/api/app/users/models/account.py b/src/core/api/app/users/models/account.py new file mode 100644 index 00000000..c8a5bf1a --- /dev/null +++ b/src/core/api/app/users/models/account.py @@ -0,0 +1,500 @@ +from datetime import datetime, timedelta + +from autoslug import AutoSlugField +from core.mixins.models import AuditModelMixin +from core.models import DatacovesModel +from dateutil.relativedelta import relativedelta +from django.conf import settings +from django.contrib.auth.models import Group, Permission +from django.contrib.contenttypes.models import ContentType +from django.db import models +from django.db.models import Sum +from django.utils import timezone + +from .group import ExtendedGroup +from .permission import parse_permission_name +from .user import User + +MAX_SLUG_LENGTH = 30 + + +def get_default_workers_execution_limit(): + return {"airflow": 36000, "airbyte": 36000} + + +def get_default_notifications_enabled(): + return {"billing": False, "cluster": False} + + +def account_slug(account): + return account.name[:MAX_SLUG_LENGTH] + + +class AccountManager(models.Manager): + def active_accounts(self): + return self.get_queryset().filter(deactivated_at__isnull=True) + + def active_trial_accounts(self): + return self.active_accounts().filter(trial_ends_at__gt=timezone.now()) + + +class Account(AuditModelMixin, DatacovesModel): + """The billing and quota glue for the rest of the system + + Projects have accounts, and then projects have environments. Accounts + have a lot of fields regarding billing, subscriptions, and quota. + + It uses a custom manager which provides 'active_accounts()' and + 'active_trial_accounts()' which return querysets of current + active accounts or current active trial accounts respsectively. + + ======= + Methods + ======= + + - **save(...)** - Overridden to set trial_start if the plan is + set to one with a trial. + - **update_from_subscription(subscription)** - + Updates account from a Stripe subscription object + - **is_suspended(cluster)** - Returns true if the account is + suspended + - **create_permissions()** - Not usually called by users. This + creates permissions needed for the account, called by post save hook + on creation. + - **create_account_groups()** - Same as above, except for groups. + - **get_users_admin_permissions(slug)** - Static method. + Returns queryset of user admin permissions. + - **get_groups_admin_permissions(slug)** - Static method. + Returns queryset of group admin permissions + - **get_admin_permissions(slug)** - Static method. + Returns queryset of all admin permissions + - **get_admin_groups(slug)** - Static method. + Returns queryset of Groups with admin permissions + - **get_admin_users(slug)** - Static method. + Returns queryset of Users that have admin permissions + - **from_permission_names(permission_names)** - Static method. + Returns queryset of Accounts that are accessible to the given list + of permission names. + """ + + name = models.CharField(max_length=50) + slug = AutoSlugField(populate_from=account_slug, unique=True) + settings = models.JSONField(default=dict, null=True, blank=True) + deactivated_at = models.DateTimeField(blank=True, null=True) + created_by = models.ForeignKey( + "User", + on_delete=models.SET_NULL, + null=True, + blank=True, + ) + # Add this after enabling "transfer ownership" feature + # owned_by = models.ForeignKey( + # "User", + # on_delete=models.SET_NULL, + # null=True, + # blank=True, + # ) + plan = models.ForeignKey( + "billing.Plan", on_delete=models.SET_NULL, null=True, blank=True + ) + # Plan variant: each plan has different amount options. Customer + # can only be subscribed to one variant. + variant = models.CharField(max_length=32, default="standard") + + customer_id = models.CharField(max_length=50, unique=True, null=True, blank=True) + + # subscription = {"id": "sub_...", "items": [subscription_item, ...], ...} + # subscription_item = {"id": "si_...", "price": "price_..."} + subscription = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="This subscription object comes from Stripe.", + ) + cancelled_subscription = models.JSONField( + default=dict, + null=True, + blank=True, + help_text="The cancelled subscription object, if it has been " "cancelled", + ) + subscription_updated_at = models.DateTimeField(null=True, blank=True) + + trial_started_at = models.DateTimeField(null=True, blank=True) + trial_ends_at = models.DateTimeField(null=True, blank=True) + + workers_execution_limit = models.JSONField( + default=get_default_workers_execution_limit, + null=True, + help_text="max execution seconds allowed per period at account level", + ) + approve_billing_events = models.BooleanField( + default=True, + help_text="Is approval required for billing events before they are informed?", + ) + notifications_enabled = models.JSONField( + default=get_default_notifications_enabled, null=True, blank=True + ) + developer_licenses = models.PositiveIntegerField( + default=0, + help_text="Max number of developer licenses (users with access to at least one " + "code-server pod), zero means infinite.", + ) + + objects = AccountManager() + + def __str__(self): + return self.slug + + def save(self, *args, **kwargs): + """Setting trial_start if plan was changed to a plan with trial""" + old_version = None + if self.pk: + old_version = Account.objects.get(id=self.pk) + if ( + self.plan + and self.plan.trial_period_days > 0 + and (not old_version or old_version.plan != self.plan) + ): + self.trial_started_at = timezone.now() + self.trial_ends_at = self.trial_started_at + timedelta( + days=self.plan.trial_period_days + ) + + return super().save(*args, **kwargs) + + def update_from_subscription(self, subscription: dict): + """Updates account from a Stripe subscription object""" + + self.plan = self.plan.__class__.objects.get(slug=subscription.metadata["plan"]) + subscription_data = subscription.to_dict_recursive() + subscription_data["items"] = [ + si.to_dict_recursive() for si in subscription["items"].auto_paging_iter() + ] + self.subscription = subscription_data + self.subscription_updated_at = timezone.now() + self.save() + + @property + def owned_by(self): + """Returns user that created the account""" + return self.created_by + + @property + def remaining_trial_days(self): + """Calculates and returns remaining days in free trial""" + return (self.trial_ends_at - timezone.now()).days if self.is_on_trial else -1 + + @property + def subscription_id(self): + """Returns the subscription's 'id' field if the subscription is set, + None otherwise. + """ + + if not self.subscription: + return None + return self.subscription.get("id") + + @property + def subscription_current_period_start(self): + """Returns the current period's subscription start date or + None if not subscribed + """ + + if not self.is_subscribed: + return None + return datetime.fromtimestamp( + self.subscription["current_period_start"], timezone.get_default_timezone() + ).replace(tzinfo=timezone.get_default_timezone()) + + @property + def subscription_current_period_end(self): + """Returns the current period's subscription end date or None if + not subscribed. + """ + + if not self.is_subscribed: + return None + return datetime.fromtimestamp( + self.subscription["current_period_end"], timezone.get_default_timezone() + ).replace(tzinfo=timezone.get_default_timezone()) + + @property + def cancelled_subscription_period_start(self): + """Returns the cancelled subscription start date or + None if there isn't a cancelled subscription. + """ + if not self.cancelled_subscription: + return None + return datetime.fromtimestamp( + self.cancelled_subscription["current_period_start"], + timezone.get_default_timezone(), + ).replace(tzinfo=timezone.get_default_timezone()) + + @property + def cancelled_subscription_period_end(self): + """Returns the cancelled subscription end date or + None if there isn't a cancelled subscription. + """ + + if not self.cancelled_subscription: + return None + return datetime.fromtimestamp( + self.cancelled_subscription["current_period_end"], + timezone.get_default_timezone(), + ).replace(tzinfo=timezone.get_default_timezone()) + + @property + def is_active(self) -> bool: + """Returns boolean if the account is active or not""" + + return self.deactivated_at is None + + @property + def has_environments(self) -> bool: + """Returns boolean True if the account has environments""" + + from projects.models import Environment + + return Environment.objects.filter(project__account=self).count() > 0 + + @property + def on_starter_plan(self) -> bool: + """Returns true if the account is on the starter plan""" + + return self.plan and self.plan.is_starter + + def is_suspended(self, cluster) -> bool: + """Returns true if the account is suspended""" + + if not cluster.is_feature_enabled("accounts_signup"): + return False + return not self.is_active or (not self.is_subscribed and not self.is_on_trial) + + @property + def is_on_trial(self) -> bool: + """Returns True if the account is on a trial""" + + return self.trial_ends_at and self.trial_ends_at > timezone.now() + + @property + def is_subscribed(self) -> bool: + """Returns True if the account is subscribed""" + + return self.subscription_id is not None + + @property + def current_cycle_start(self): + """Returns the start date of the current subscription or trial. + Returns None if the account is neither subscribed nor on a trial. + """ + + if self.is_on_trial: + return self.trial_started_at + if self.is_subscribed: + now = timezone.now() + current_period_start = datetime.fromtimestamp( + self.subscription["current_period_start"], + timezone.get_default_timezone(), + ).replace(tzinfo=timezone.get_default_timezone()) + current_cycle_start = current_period_start.replace( + year=now.year, month=now.month + ) + if current_cycle_start > now: + current_cycle_start = current_cycle_start - relativedelta(months=1) + current_cycle_start = current_cycle_start.replace( + hour=0, minute=0, second=0, microsecond=0 + ) + return current_cycle_start + return None + + def create_permissions(self): + """Creates permissions for the account, called by a post save hook""" + + content_type = ContentType.objects.get(app_label="users", model="account") + for resource in settings.ACCOUNT_RESOURCES: + w_name = f"{self.slug}|{resource}|{settings.ACTION_WRITE}" + r_name = f"{self.slug}|{resource}|{settings.ACTION_READ}" + Permission.objects.get_or_create( + name=w_name, + content_type=content_type, + defaults={"codename": w_name[:100]}, + ) + Permission.objects.get_or_create( + name=r_name, + content_type=content_type, + defaults={"codename": r_name[:100]}, + ) + + def create_account_groups(self): + """Creates groups for the account, called by a post save hook""" + + existing = ExtendedGroup.objects.filter( + role=ExtendedGroup.Role.ROLE_DEFAULT, account=self + ).count() + if existing == 0: + # Default group + default_group, _ = Group.objects.get_or_create( + name=f"'{self.slug}' account default" + ) + ExtendedGroup.objects.create( + group=default_group, + role=ExtendedGroup.Role.ROLE_DEFAULT, + account=self, + ) + + # Admin group + group, _ = Group.objects.get_or_create(name=f"'{self.slug}' account admins") + ExtendedGroup.objects.create( + group=group, + role=ExtendedGroup.Role.ROLE_ACCOUNT_ADMIN, + account=self, + ) + for permission in self.account_level_permissions: + group.permissions.add(permission) + + @property + def account_level_permissions(self): + """Returns queryset for Permissions that belong to this account""" + # TODO: Remove "admin:" after multi-tenant grafana support + return Permission.objects.filter(name__startswith=f"{self.slug}|admin:") + + @property + def users(self): + """Returns queryset of users that have access to this account""" + + return User.objects.filter(groups__extended_group__account=self).distinct() + + @property + def developers(self): + """Returns queryset of users with code server permissions on this + account. + """ + return self.users.filter( + groups__permissions__name__contains=f"|workbench:{settings.SERVICE_CODE_SERVER}", + is_superuser=False, + ).distinct() + + @property + def developers_without_license(self): + """Get the list of users that are not allowed because of developers limit configuration""" + devs = self.developers + max_devs = self.developers_limit + if max_devs and len(devs) > max_devs: + return devs.order_by("created_at")[max_devs:] + return [] + + @classmethod + def get_users_admin_permissions(cls, slug): + """Returns queryset of user admin permissions""" + + return Permission.objects.filter( + name=f"{slug}|{settings.ADMIN_USERS_RESOURCE}|{settings.ACTION_WRITE}" + ) + + @classmethod + def get_groups_admin_permissions(cls, slug): + """Returns queryset of group admin permissions""" + + return Permission.objects.filter( + name=f"{slug}|{settings.ADMIN_GROUPS_RESOURCE}|{settings.ACTION_WRITE}" + ) + + @classmethod + def get_admin_permissions(cls, slug): + """Returns queryset of all admin permissions""" + + return Permission.objects.filter( + name__in=[ + f"{slug}|{settings.ADMIN_GROUPS_RESOURCE}|{settings.ACTION_WRITE}", + f"{slug}|{settings.ADMIN_USERS_RESOURCE}|{settings.ACTION_WRITE}", + ] + ) + + @classmethod + def get_admin_groups(cls, slug): + """Returns queryset of Groups with admin permissions""" + + return Group.objects.filter( + permissions__in=cls.get_admin_permissions(slug) + ).distinct() + + @classmethod + def get_admin_users(cls, slug): + """Returns queryset of Users that have admin permissions""" + + return User.objects.filter(groups__in=cls.get_admin_groups(slug)) + + @staticmethod + def from_permission_names(permission_names): + """Returns queryset of Accounts that are accessible to the given list + of permission names. + """ + + if not permission_names: + return [] + account_slugs = set() + for name in permission_names: + permission_data = parse_permission_name(name) + account_slugs.add(permission_data.get("account_slug")) + return Account.objects.filter(slug__in=account_slugs) + + @property + def workers_execution_limit_per_period(self): + """Get execution seconds from account if is configured or from plan if execution seconds are missing + in the account""" + return self.workers_execution_limit or ( + self.plan and self.plan.workers_execution_limit + ) + + @property + def developers_limit(self): + """Max number of developers allowed""" + return self.developer_licenses or (self.plan and self.plan.developer_licenses) + + @property + def airflow_workers_seconds_sum(self): + """Given an account, and sum all tally markers for airflow in the current period, + finally return the total""" + from billing.models import TallyMark + + first_datetime = self.current_cycle_start + if not first_datetime: + return 0 + last_datetime = first_datetime + relativedelta(months=1) - datetime.resolution + tally_mark_aggregation = TallyMark.objects.filter( + tally__account=self, + tally__name="airflow_workers_daily_running_time_seconds", + time__gte=first_datetime, + time__lte=last_datetime, + ).aggregate(total=Sum("amount")) + total = tally_mark_aggregation.get("total") or 0 + return total + + @property + def airbyte_workers_seconds_sum(self): + """Given an account, and sum all tally markers for airbyte in the current period, + finally return the total""" + from billing.models import TallyMark + + first_datetime = self.current_cycle_start + if not first_datetime: + return 0 + last_datetime = first_datetime + relativedelta(months=1) - datetime.resolution + tally_mark_aggregation = TallyMark.objects.filter( + tally__account=self, + tally__name="airbyte_workers_daily_running_time_seconds", + time__gte=first_datetime, + time__lte=last_datetime, + ).aggregate(total=Sum("amount")) + total = tally_mark_aggregation.get("total") or 0 + return total + + @property + def airflow_workers_minutes_sum(self): + """Calculates airflow worker utilization in minutes and returns it""" + return self.airflow_workers_seconds_sum // 60 + + @property + def airbyte_workers_minutes_sum(self): + """Calculates airbyte worker utilization in minutes and returns it""" + return self.airbyte_workers_seconds_sum // 60 diff --git a/src/core/api/app/users/models/group.py b/src/core/api/app/users/models/group.py new file mode 100644 index 00000000..090c10e2 --- /dev/null +++ b/src/core/api/app/users/models/group.py @@ -0,0 +1,126 @@ +from core.models import DatacovesModel +from django.contrib.auth.models import Group +from django.db import models +from users.models.permission import parse_permission_name + + +class ExtendedGroup(DatacovesModel): + """Extended groups map identity groups to groups + + This is a way we can support Active Directory or other SSO systems. + It maps identity groups from such systems to Django authentication + groups + + ======= + Methods + ======= + + - **create_name()** - Generate a name for this extended group + - **save(...)** - Runs create_name to generate a name if the 'name' + field is left unset. + """ + + class Role(models.TextChoices): + ROLE_DEFAULT = "default", "Default" + ROLE_ACCOUNT_ADMIN = "account_admin", "Account Admin" + ROLE_PROJECT_DEVELOPER = "project_developer", "Project Developer" + ROLE_PROJECT_VIEWER = "project_viewer", "Project Viewer" + ROLE_PROJECT_SYSADMIN = "project_sysadmin", "Project Sys Admin" + ROLE_PROJECT_ADMIN = "project_admin", "Project Admin" + ROLE_ENVIRONMENT_DEVELOPER = "environment_developer", "Environment Developer" + ROLE_ENVIRONMENT_VIEWER = ( + "environment_viewer", + "Environment Viewer", + ) + ROLE_ENVIRONMENT_SYSADMIN = "environment_sysadmin", "Environment Sys Admin" + ROLE_ENVIRONMENT_ADMIN = "environment_admin", "Environment Admin" + + group = models.OneToOneField( + Group, on_delete=models.CASCADE, related_name="extended_group" + ) + account = models.ForeignKey("Account", on_delete=models.CASCADE) + identity_groups = models.JSONField( + null=True, + blank=True, + default=list, + help_text="A list of groups from the external identity source which " + "will map to this group with the given role.", + ) + project = models.ForeignKey( + "projects.Project", on_delete=models.CASCADE, null=True, blank=True + ) + environment = models.ForeignKey( + "projects.Environment", on_delete=models.CASCADE, null=True, blank=True + ) + role = models.CharField(max_length=30, choices=Role, null=True, blank=True) + name = models.CharField(max_length=100, null=True, blank=True) + + def __str__(self): + return self.name + + def save(self, *args, **kwargs): + """Generate a name if name is not set""" + + if not self.name: + self.name = self.create_name() + super().save(*args, **kwargs) + + def _parse_resource_name(self, resource: str = None) -> str: + """ + Parse the service:resource name to get the resource name + """ + if resource is None: + return "" + + # Garantizar que solo se procesen nombres válidos + parts = resource.split(":") + if len(parts) > 1: + return " ".join(parts[1:]) + return "" + + def create_name(self): + """Generate a name for this extended group""" + + if self.environment: + return f"{self.environment.name} ({self.environment.slug}) {self.get_role_display()}" + if self.project: + return f"{self.project.name} {self.get_role_display()}" + if self.role: + return f"{self.account.name} {self.get_role_display()}" + else: + return self.group.name.replace(f"'{self.account.slug}' ", "") + + @property + def description(self): + """Generate a text-based description of this extended group""" + + # , + permission_names = list( + self.group.permissions.all().values_list("name", flat=True) + ) + + # {'resource': 'workbench:airbyte', 'environment_slug': None, 'project_slug': 'analytics-local'} + parsed_permissions = map( + lambda name: parse_permission_name(name), permission_names + ) + + description_lines = set() + template = "Users in {environment_slug} environment of {project_slug} project with access to {resources}" + for permission in parsed_permissions: + project = permission.get("project_slug") + environment = permission.get("environment_slug") + resources = set( + self._parse_resource_name(p.get("resource")) + for p in parsed_permissions + if p.get("project_slug") == project + and p.get("environment_slug") == environment + ) + description = template.format( + environment_slug=f"{environment if environment is not None else 'any'}", + project_slug=f"{project if project is not None else 'any'}", + resources=", ".join(resources), + ) + + description_lines.add(description) + + return "\n".join(description_lines) diff --git a/src/core/api/app/users/models/permission.py b/src/core/api/app/users/models/permission.py new file mode 100644 index 00000000..fc465b70 --- /dev/null +++ b/src/core/api/app/users/models/permission.py @@ -0,0 +1,44 @@ +from django.contrib.auth.models import Permission + + +def make_permission_name( + resource, + action, + scope=None, + account_slug=None, + project_slug=None, + environment_slug=None, +): + if scope is None: + if account_slug is None and project_slug is None and environment_slug is None: + return f"{resource}|{action}" + scope = [] + if account_slug: + scope.append(account_slug) + if project_slug: + scope.append(project_slug) + if environment_slug: + scope.append(environment_slug) + scope = ":".join(scope) + return f"{scope}|{resource}|{action}" + + +def parse_permission_name(name): + if isinstance(name, Permission): + name = name.name + try: + scope, resource, action = name.split("|") + except ValueError: + raise Exception(f"'{name}' is not a valid permission name") + scope_parts = scope.split(":") + account = scope_parts[0] if len(scope_parts) > 0 else None + project = scope_parts[1] if len(scope_parts) > 1 else None + environment = scope_parts[2] if len(scope_parts) > 2 else None + return { + "scope": scope, + "resource": resource, + "action": action, + "environment_slug": environment, + "project_slug": project, + "account_slug": account, + } diff --git a/src/core/api/app/users/models/user.py b/src/core/api/app/users/models/user.py new file mode 100644 index 00000000..da48d9cf --- /dev/null +++ b/src/core/api/app/users/models/user.py @@ -0,0 +1,344 @@ +from autoslug import AutoSlugField +from core.mixins.models import AuditModelMixin, EidModelMixin +from core.models import DatacovesModel +from django.conf import settings +from django.contrib.auth.models import ( + AbstractBaseUser, + BaseUserManager, + Permission, + PermissionsMixin, +) +from django.db import models +from django.db.models import F, Max, Q +from projects import models as projects_models + +from .permission import parse_permission_name + + +class UserManager(BaseUserManager): + def create_user(self, email, password=None, **extra_field): + if not email: + raise ValueError("Users must have an email address") + + user = self.model(email=self.normalize_email(email), **extra_field) + + user.set_password(password) + user.save(using=self._db) + return user + + def create_superuser(self, email, password, name=None): + user = self.create_user(email=email, password=password, name=name) + user.is_superuser = True + user.save(using=self._db) + return user + + def get_or_create(self, defaults=None, **kwargs): + email = kwargs.pop("email") + if not email: + raise ValueError("Users must have an email address") + + try: + return self.get(email__iexact=self.normalize_email(email), **kwargs), False + except self.model.DoesNotExist: + return self.create_user(email=email, **defaults), True + + +def user_slug(user): + return user.email.split("@")[0][:5] + + +class User( + EidModelMixin, AuditModelMixin, PermissionsMixin, AbstractBaseUser, DatacovesModel +): + """A user in our system, wrapping a Django base user + + This has a custom UserManager which provides a few methods: + **create_user(email, password=None, ...)** which creates a new user, + passing additional kwrgs to this model object if provided. It + will normalize the mail. + + **create_superuser(email, password)** Creates a super user + + **get_or_create(defaults=None, ...)** Must be passed an 'email' kw + arg, and either returns an existing user with that email or runs + create_user to make a new one. 'defaults' will be passed to + create_user as kwargs. Additional kwargs will be passed to the + query which looks up the user by email. + + ======= + Methods + ======= + + - **bulk_permission_names(users, filter=None)** - Static method. + This is a static method to fetch bulk user permissions as is + useful for doing mass actions. Taking a group of users, it will + return a dictionary mapping user ID's to each's list of unique + permissions. Filter, if provided, is passed in to filter the results. + The intention is to pass in a set of Q-style query filters + - **get_bulk_environment_permission_names(users, environment)** - + Static method. Like get_environment_permissions, but designed to work + with a list or queryset of users rather than one a time. This is more + efficient for bulk actions. It will return a dictionary mapping the + user to lists of Permission objects. + - **get_account_permissions(account_slug)** - Get queryset of permissions + the user has for the given account slug + - **is_account_admin(account_slug)** - Returns if the user has admin + permissions for the given account slug + - **get_environment_permission_names(environment)** - + Returns a list of environment permissions the user has for the + given environment + - **project_and_env_slugs(filter="|workbench:")** - + Returns the projects and environments slugs a user has access to + - **service_resource_permissions(service, env=None)** - + Returns the user's allowed actions on services resources for a + specific service return example: ``['*|read', 'security|write']`` + - **get_repositories_tested(repository)** - + Return a queryset of repositories that are validated + - **is_repository_tested(repository)** - + Checks to see if a given repository has been validated + """ + + email = models.EmailField("email", unique=True) + name = models.CharField(max_length=130, null=True, blank=True) + avatar = models.CharField(max_length=200, null=True, blank=True) + deactivated_at = models.DateTimeField(blank=True, null=True) + is_superuser = models.BooleanField(default=False) + is_service_account = models.BooleanField(default=False) + settings = models.JSONField(default=dict, blank=True) + slug = AutoSlugField(populate_from=user_slug, unique=True) + setup_enabled = models.BooleanField(null=True) + + USERNAME_FIELD = "email" + REQUIRED_FIELDS = ["name"] + + objects = UserManager() + + def __str__(self): + return self.email + + @property + def is_staff(self): + """ + Only super users can login to the django admin + """ + return self.is_superuser + + @property + def email_username(self): + """Returns just the username portion of the email""" + return self.email.split("@")[0].lower() + + @property + def first_name(self): + """Returns the first word in the user's name field""" + words = self.name.split() + return words[0] if words else self.name + + @property + def last_name(self): + """Returns all words except the first word in the user's name field""" + words = self.name.split() + return " ".join(words[1:]) if words else self.name + + @property + def accounts(self): + """Returns queryset of accounts this user has access to""" + from .account import Account + + return Account.objects.filter( + deactivated_at__isnull=True, extendedgroup__group__in=self.groups.all() + ).distinct() + + @property + def permissions(self): + """Returns queryset of permissions associated with this user""" + + return Permission.objects.filter( + Q(user__in=[self]) | Q(group__in=self.groups.all()), + ).distinct() + + @property + def permissions_names(self): + """Returns queryset of permissions associated with this user""" + + return ( + Permission.objects.filter( + Q(user__in=[self]) | Q(group__in=self.groups.all()), + ) + .distinct() + .values_list("name", flat=True) + ) + + @staticmethod + def bulk_permission_names(users, filter=None) -> dict: + """This is a static method to fetch bulk user permissions as is + useful for doing mass actions. + + Taking a group of users, it will return a dictionary mapping user ID's + to each's list of unique permissions. + + Filter, if provided, is passed in to filter the results. The + intention is to pass in a set of Q-style query filters. + """ + + query = ( + Permission.objects.filter(group__user__in=users) + .annotate(user_id=F("group__user__id")) + .values("id", "user_id") + .annotate(name=Max("name")) + .order_by() + ) # clear default ordering + + if filter is not None: + query = query.filter(filter) + + ret = {} + + for x in query: + if x["user_id"] not in ret: + ret[x["user_id"]] = [] + + ret[x["user_id"]].append(x["name"]) + + return ret + + @staticmethod + def get_bulk_environment_permission_names(users, environment) -> dict: + """Like get_environment_permissions, but designed to work with a list + or queryset of users rather than one a time. This is more efficient + for bulk actions. It will return a dictionary mapping the user to + lists of Permission objects. + """ + + return User.bulk_permission_names( + users, + Q(group__extended_group__account__slug=environment.project.account.slug) + & ( + Q(name__icontains=f":{environment.project.slug}|") + | Q(name__icontains=f":{environment.slug}|") + ), + ) + + def get_account_permissions(self, account_slug): + """Get queryset of permissions the user has for the given account slug""" + return self.permissions.filter( + group__extended_group__account__slug=account_slug, + ) + + def is_account_admin(self, account_slug): + """Returns if the user has admin permissions for the given account slug""" + + return ( + self.get_account_permissions(account_slug) + .filter(name__icontains="|admin:") + .count() + > 0 + ) + + def get_environment_permission_names(self, environment): + """Returns a list of environment permissions the user has for the + given environment""" + + return ( + self.get_account_permissions(environment.project.account.slug) + .filter( + Q(name__icontains=f":{environment.project.slug}|") + | Q(name__icontains=f":{environment.slug}|") + ) + .values_list("name", flat=True) + ) + + def project_and_env_slugs(self, filter="|workbench:"): + """Returns the projects and environments slugs a user has access to""" + project_slugs = set() + env_slugs = set() + for permission in self.permissions.filter(name__icontains=filter): + name = parse_permission_name(permission) + project_slug = name.get("project_slug") + env_slug = name.get("environment_slug") + if env_slug: + env_slugs.add(env_slug) + elif project_slug: + project_slugs.add(project_slug) + return project_slugs, env_slugs + + @property + def projects(self): + """Returns a queryset of projects the user has access to""" + + project_slugs, env_slugs = self.project_and_env_slugs() + return projects_models.Project.objects.filter( + Q(slug__in=project_slugs) | Q(environments__slug__in=env_slugs) + ).distinct() + + @property + def environments(self): + """Returns a queryset of environments the user has access to""" + + project_slugs, env_slugs = self.project_and_env_slugs() + return projects_models.Environment.objects.filter( + Q(slug__in=env_slugs) | Q(project__slug__in=project_slugs) + ) + + def service_resource_permissions(self, service, env=None): + """ + Returns the user's allowed actions on services resources for a specific service + return example: ['*|read', 'security|write'] + """ + if env: + project_slug = env.project.slug + env_slug = env.slug + permissions = self.permissions.filter( + Q(name__icontains=f"{project_slug}|workbench:{service}") + | Q(name__icontains=f"{env_slug}|workbench:{service}") + ) + else: + permissions = self.permissions.filter( + name__icontains=f"|services:{service}" + ) + res_permissions = [] + for permission in permissions: + name = parse_permission_name(permission) + action = name["action"] + resource = name["resource"].split(":")[2:] + res_permissions.append(f"{resource[0] if resource else '*'}|{action}") + return set(res_permissions) + + @property + def trial_accounts(self): + """Returns a count of trial accounts associated with the user""" + from .account import Account + + return Account.objects.filter( + created_by=self, + trial_started_at__isnull=False, + ).count() + + def get_repositories_tested(self, repository): + """Return a queryset of repositories that are validated""" + return self.repositories.select_related("repository").filter( + repository=repository, validated_at__isnull=False + ) + + def is_repository_tested(self, repository) -> bool: + """Checks to see if a given repository has been validated""" + return self.get_repositories_tested(repository).exists() + + def can_use_local_airflow(self, env) -> bool: + """Returns True if the user is allowed to use local airflow. + Doesn't check if local_airflow feature is enabled or not. + """ + + for permission in self.service_resource_permissions("airflow", env): + if "admin" in permission: + return True + + return False + + def save(self, *args, **kwargs): + """ + Auto-enable account setup for superuser/localhost users + """ + if self.is_superuser or settings.DEBUG: + self.setup_enabled = True + super().save(*args, **kwargs) diff --git a/src/core/api/app/users/serializers.py b/src/core/api/app/users/serializers.py new file mode 100644 index 00000000..aefd068b --- /dev/null +++ b/src/core/api/app/users/serializers.py @@ -0,0 +1,214 @@ +from billing.serializers import PlanSerializer +from clusters.request_utils import get_cluster +from django.conf import settings +from django.db import transaction +from rest_framework import serializers +from rest_framework.exceptions import ValidationError +from users.models import Account + +from .models import ExtendedGroup, Group, User + + +class AccountSerializer(serializers.ModelSerializer): + owned_by = serializers.SerializerMethodField() + plan = PlanSerializer(required=False) + is_suspended = serializers.SerializerMethodField() + + class Meta: + model = Account + fields = ( + "name", + "slug", + "plan", + "owned_by", + "subscription_id", + "trial_ends_at", + "remaining_trial_days", + "has_environments", + "is_suspended", + ) + read_only_fields = ( + "slug", + "plan", + "owned_by", + "subscription_id", + "trial_ends_at", + ) + + def get_owned_by(self, obj): + return obj.owned_by.email if obj.owned_by else None + + def get_subscription_id(self, obj): + return obj.subscription_id + + def get_is_suspended(self, obj): + cluster = get_cluster(self.context["request"]) + return obj.is_suspended(cluster) + + +class UserInfoSerializer(serializers.ModelSerializer): + """This serializer is called in three different contexts: + - on account setup: no account_slug nor env_slug are specified + - on launchpad: account_slug is specified + - on workbench: env_slug is specified + """ + + permissions = serializers.SerializerMethodField() + projects = serializers.SerializerMethodField() + features = serializers.SerializerMethodField() + release = serializers.SerializerMethodField() + customer_portal = serializers.SerializerMethodField() + has_license = serializers.SerializerMethodField() + # These two fields are returned only if env_slug is passed by queryparam + user_environments = serializers.SerializerMethodField() + env_account = serializers.SerializerMethodField() + has_dynamic_blob_storage_provisioning = serializers.SerializerMethodField() + has_dynamic_network_filesystem_provisioning = serializers.SerializerMethodField() + + class Meta: + model = User + fields = ( + "name", + "email", + "email_username", + "slug", + "avatar", + "permissions", + "projects", + "trial_accounts", + "features", + "user_environments", + "release", + "customer_portal", + "has_license", + "env_account", + "has_dynamic_blob_storage_provisioning", + "has_dynamic_network_filesystem_provisioning", + "setup_enabled", + ) + + def _get_account(self, user): + account = None + env_slug = self.context["environment"] + account_slug = self.context["account"] + if env_slug: + account = user.accounts.filter( + projects__environments__slug=env_slug + ).first() + elif account_slug: + account = user.accounts.filter(slug=account_slug).first() + return account + + def get_permissions(self, obj): + account = self._get_account(obj) + if not account: + return [] + if account.is_suspended(get_cluster(self.context["request"])): + return [] + permissions = obj.get_account_permissions(account.slug) + if account.is_on_trial: + for resource in settings.IAM_RESOURCES: + permissions = permissions.exclude(name__icontains="|" + resource) + return permissions.values_list("name", flat=True) + + def get_projects(self, obj): + from projects.serializers import ProjectSerializer + + account = self._get_account(obj) + if not account: + return [] + if account.is_suspended(get_cluster(self.context["request"])): + return [] + return ProjectSerializer( + obj.projects.filter(account=account), many=True, context=self.context + ).data + + def get_features(self, obj): + return get_cluster(self.context["request"]).all_features + + def get_release(self, obj): + return get_cluster(self.context["request"]).release.name + + def get_user_environments(self, obj): + from projects.serializers import UserEnvironmentSerializer + + return UserEnvironmentSerializer(obj.user_environments, many=True).data + + def get_env_account(self, obj): + env_slug = self.context["environment"] + if env_slug: + account = obj.accounts.filter(projects__environments__slug=env_slug).first() + if account: + return account.slug + return None + + def get_customer_portal(self, obj): + return settings.STRIPE_CUSTOMER_PORTAL + + def get_has_license(self, obj): + account = self._get_account(obj) + if not account: + return True + return obj not in account.developers_without_license + + def get_has_dynamic_blob_storage_provisioning(self, obj): + return get_cluster( + self.context["request"] + ).has_dynamic_blob_storage_provisioning() + + def get_has_dynamic_network_filesystem_provisioning(self, obj): + return get_cluster( + self.context["request"] + ).has_dynamic_network_filesystem_provisioning() + + +class UserSerializer(serializers.ModelSerializer): + class Meta: + model = User + fields = ("name", "email", "groups", "id", "last_login") + + def update(self, instance, validated_data): + """Checks if this is the last account admin""" + account_slug = self.context["account"] + + admin_users = list(Account.get_admin_users(account_slug)) + if instance in admin_users and len(admin_users) == 1: + # Check if user becomes non admin + groups = [group.id for group in validated_data["groups"]] + is_users_admin = Group.objects.filter( + id__in=groups, + permissions__in=Account.get_users_admin_permissions(account_slug), + ).count() + is_groups_admin = Group.objects.filter( + id__in=groups, + permissions__in=Account.get_groups_admin_permissions(account_slug), + ).count() + if not is_users_admin or not is_groups_admin: + raise ValidationError( + "You need to keep at least one admin in the account." + ) + return super().update(instance, validated_data) + + def create(self, validated_data): + """Adding user to default account group to explicitly make him belong to it""" + # FIXME: This method is not used right now, users are created through + # invitations, keeping if needed for on-prem + with transaction.atomic(): + instance = super().create(validated_data) + account = self.context["account"] + account_group = Group.objects.get( + extended_group__role=ExtendedGroup.Role.ROLE_DEFAULT, + extended_group__account__slug=account, + ) + instance.groups.add(account_group) + + def to_representation(self, instance): + """Returning group names to avoid an extra request on clients""" + data = super().to_representation(instance) + data["groups"] = [ + {"id": group.id, "name": group.extended_group.name} + for group in instance.groups.exclude( + extended_group__role=ExtendedGroup.Role.ROLE_DEFAULT + ) + ] + return data diff --git a/src/core/api/app/users/signals.py b/src/core/api/app/users/signals.py new file mode 100644 index 00000000..0e9d7e14 --- /dev/null +++ b/src/core/api/app/users/signals.py @@ -0,0 +1,22 @@ +from django.db.models.signals import post_delete, post_save +from django.dispatch import receiver + +from .models import Account, ExtendedGroup + + +@receiver(post_save, sender=Account, dispatch_uid="users.handle_account_post_save") +def handle_account_post_save(sender, **kwargs): + if kwargs["created"]: + account = kwargs["instance"] + account.create_permissions() + account.create_account_groups() + + +@receiver( + post_delete, + sender=ExtendedGroup, + dispatch_uid="users.handle_extended_group_post_delete", +) +def handle_extended_group_post_delete(sender, **kwargs): + exgroup = kwargs["instance"] + exgroup.group.delete() diff --git a/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_message.html b/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_message.html new file mode 100644 index 00000000..af2d2fd3 --- /dev/null +++ b/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_message.html @@ -0,0 +1,421 @@ +{% load i18n %} {% autoescape off %} {% blocktrans %} + + + + + + + + + + This is preheader text. Some clients will show this text as a + preview. + + + + + + + + + +{% endblocktrans %} {% endautoescape %} diff --git a/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_message.txt b/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_message.txt new file mode 100644 index 00000000..83e91fb3 --- /dev/null +++ b/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_message.txt @@ -0,0 +1,11 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %} + +Hello {{user_name}} +Datacoves Support has accepted your trial request + +Accept: {{login_url}} + +{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_subject.txt b/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_subject.txt new file mode 100644 index 00000000..66dd86cb --- /dev/null +++ b/src/core/api/app/users/templates/setup_requests/accepted/setup_request_accepted_subject.txt @@ -0,0 +1,4 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %}Datacoves Support accepted your trial request.{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/users/templates/setup_requests/email/setup_request_message.html b/src/core/api/app/users/templates/setup_requests/email/setup_request_message.html new file mode 100644 index 00000000..adda73d8 --- /dev/null +++ b/src/core/api/app/users/templates/setup_requests/email/setup_request_message.html @@ -0,0 +1,456 @@ +{% load i18n %} {% autoescape off %} {% blocktrans %} + + + + + + + + + + This is preheader text. Some clients will show this text as a + preview. + + + + + + + + + +{% endblocktrans %} {% endautoescape %} diff --git a/src/core/api/app/users/templates/setup_requests/email/setup_request_message.txt b/src/core/api/app/users/templates/setup_requests/email/setup_request_message.txt new file mode 100644 index 00000000..e3bb75df --- /dev/null +++ b/src/core/api/app/users/templates/setup_requests/email/setup_request_message.txt @@ -0,0 +1,12 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %} + + +{{user_name}} ({{user_email}}) is trying to create an Account + +Accept: {{accept_url}} +Reject: {{reject_url}} + +{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/users/templates/setup_requests/email/setup_request_subject.txt b/src/core/api/app/users/templates/setup_requests/email/setup_request_subject.txt new file mode 100644 index 00000000..3f457368 --- /dev/null +++ b/src/core/api/app/users/templates/setup_requests/email/setup_request_subject.txt @@ -0,0 +1,4 @@ +{% load i18n %} +{% autoescape off %} +{% blocktrans %}{{user_name}} ({{user_email}}) is trying to create an Account{% endblocktrans %} +{% endautoescape %} \ No newline at end of file diff --git a/src/core/api/app/users/tests.py b/src/core/api/app/users/tests.py new file mode 100644 index 00000000..1101678c --- /dev/null +++ b/src/core/api/app/users/tests.py @@ -0,0 +1,19 @@ +from django.test import TestCase +from factories import AccountFactory + +from .models import MAX_SLUG_LENGTH + + +class AccountsTests(TestCase): + """ + Test Accounts different scenarios: + """ + + def test_account_slug_truncates(self) -> None: + """ + Account slug should be truncated to MAX_SLUG_LENGTH + long_name is created with length > MAX_SLUG_LENGTH + """ + long_name = "test" * 8 + account = AccountFactory.create(name=long_name) + self.assertEqual(len(account.slug), MAX_SLUG_LENGTH) diff --git a/src/core/api/app/users/views.py b/src/core/api/app/users/views.py new file mode 100644 index 00000000..c63cd434 --- /dev/null +++ b/src/core/api/app/users/views.py @@ -0,0 +1,69 @@ +from django.conf import settings +from django.shortcuts import redirect +from django.urls import reverse +from invitations.services import EmailSender +from rest_framework import views +from rest_framework.permissions import IsAdminUser, IsAuthenticated +from rest_framework.response import Response +from rest_framework.status import HTTP_200_OK, HTTP_404_NOT_FOUND + +from .models import User + + +class NotifySetupRequest(views.APIView): + REQUEST_RECEIVER = settings.SETUP_REQUESTS_RECEIVER + permission_classes = [IsAuthenticated] + + def post(self, request): + """Notify the support team about a new setup request""" + user: User = request.user + accept_url = reverse("accept-setup", kwargs={"pk": user.pk}) + accept_url = request.build_absolute_uri(accept_url) + reject_url = reverse("reject-setup", kwargs={"pk": user.pk}) + reject_url = request.build_absolute_uri(reject_url) + ctx = { + "accept_url": accept_url, + "reject_url": reject_url, + "user_email": user.email, + "user_name": user.name, + } + + email_template = "setup_requests/email/setup_request" + EmailSender.send_mail(email_template, self.REQUEST_RECEIVER, ctx) + user.setup_enabled = False + user.save() + return Response(status=HTTP_200_OK) + + +class AcceptSetupRequest(views.APIView): + permission_classes = [IsAdminUser] + + def get(self, request, pk): + """Accept a setup request""" + try: + user = User.objects.get(pk=pk) + user.setup_enabled = True + login_url = reverse("login") + login_url = request.build_absolute_uri(login_url) + ctx = {"user_name": user.name, "login_url": login_url} + email_template = "setup_requests/accepted/setup_request_accepted" + EmailSender.send_mail(email_template, user.email, ctx) + user.save() + return redirect(settings.INVITATION_SUCCESS_REDIRECT) + except User.DoesNotExist: + return Response(status=HTTP_404_NOT_FOUND) + + +class RejectSetupRequest(views.APIView): + permission_classes = [IsAdminUser] + + def get(self, request, pk): + """Accept a setup request""" + + try: + user = User.objects.get(pk=pk) + user.setup_enabled = False + user.save() + return redirect(settings.INVITATION_SUCCESS_REDIRECT) + except User.DoesNotExist: + return Response(status=HTTP_404_NOT_FOUND) diff --git a/src/core/api/requirements-bigquery.txt b/src/core/api/requirements-bigquery.txt new file mode 100644 index 00000000..57fc0696 --- /dev/null +++ b/src/core/api/requirements-bigquery.txt @@ -0,0 +1 @@ +google-cloud-bigquery==3.23.1 \ No newline at end of file diff --git a/src/core/api/requirements-databricks.txt b/src/core/api/requirements-databricks.txt new file mode 100644 index 00000000..0bdedada --- /dev/null +++ b/src/core/api/requirements-databricks.txt @@ -0,0 +1 @@ +databricks-sql-connector==3.1.2 \ No newline at end of file diff --git a/src/core/api/requirements-redshift.txt b/src/core/api/requirements-redshift.txt new file mode 100644 index 00000000..5fa7fc74 --- /dev/null +++ b/src/core/api/requirements-redshift.txt @@ -0,0 +1,3 @@ +redshift-connector==2.1.1 +# pip-audit: subdependency explicitly fixed +requests==2.31.0 diff --git a/src/core/api/requirements-snowflake.txt b/src/core/api/requirements-snowflake.txt new file mode 100644 index 00000000..a3ebd18b --- /dev/null +++ b/src/core/api/requirements-snowflake.txt @@ -0,0 +1,4 @@ +urllib3==1.26.20 +cryptography==41.0.4 +snowflake-connector-python==3.15.0 +certifi==2025.1.31 diff --git a/src/core/api/requirements.txt b/src/core/api/requirements.txt new file mode 100644 index 00000000..8f53d39c --- /dev/null +++ b/src/core/api/requirements.txt @@ -0,0 +1,61 @@ +boto3==1.24.2 +celery==5.4.0 +channels==4.1.0 +channels-redis==4.2.0 +# FIXME: Downgraded since core's OIDC server started failing: on token exchange +# failed: oauth2: cannot fetch token: 400 Bad Request Response: {"error": "invalid_grant"} +#cryptography==41.0.4 +cryptography==3.4.8 +daphne==4.1.2 +Django==5.0.7 +djangorestframework==3.15.2 +djangorestframework-recursive==0.1.2 +djangorestframework-simplejwt==5.3.1 +django-rest-knox==5.0.2 +# django-admin-list-filter-dropdown==1.0.3 +django-admin-csvexport==2.3 +django-autoslug==1.9.8 +django-celery-results==2.5.1 +django-celery-beat==2.6.0 +django-cors-headers==4.4.0 +django-debug-toolbar==4.4.6 +django_extensions==3.2.3 +django-filter==24.2 +django-grappelli==3.0.8 +django-health-check==3.18.3 +django-json-widget==1.1.1 +django-redis==5.4.0 +django-object-actions==3.0.2 +django-oauth-toolkit==2.3.0 +django-prometheus==2.3.1 +django-taggit==6.0.0 +docker==6.0.0 +dotmap==1.3.30 +email-validator==1.3.0 +factory-boy==3.2.1 +Faker==13.3.4 +flower==2.0.0 +gql[requests]==3.5.0 +GitPython==3.1.43 +Jinja2==3.1.2 +ipdb==0.13.9 +ipython==8.10 +kubernetes==29.0.0 +prometheus_client==0.20.0 +psutil==7.0.0 +psycopg2-binary==2.9.9 +pydantic==1.10.2 +pytest==8.3.4 +pytest-asyncio==0.25.3 +pytest-django==4.5.2 +python-ldap==3.4.3 +PyJWT==2.4.0 +PyYAML==6.0.2 +redis==5.0.7 +rich==14.0.0 +sentry-sdk +slack-sdk +social-auth-app-django==5.0.0 +social-auth-core[openidconnect]==4.1.0 +stripe==10.4.0 +tenacity==9.0.0 diff --git a/src/core/api/uwsgi.yaml b/src/core/api/uwsgi.yaml new file mode 100644 index 00000000..e07ed89d --- /dev/null +++ b/src/core/api/uwsgi.yaml @@ -0,0 +1,22 @@ +# https://uwsgi-docs.readthedocs.io/en/latest/Options.html + +uwsgi: + module: datacoves.wsgi:application + master: true + pidfile: /tmp/datacoves.pid + vacuum: true + max-requests: 5000 + http-socket: 0.0.0.0:8000 + processes: 3 + harakiri: 120 + env: DJANGO_SETTINGS_MODULE=datacoves.settings + uid: 1000 + gid: 1000 + single-interpreter: true + enable-threads: true + offload-threads: %k + # required for auth2 endpoints + buffer-size: 16384 + + static-map: /static=/usr/src/app/assets + static-expires: /* 7776000 diff --git a/src/core/dbt-api/.dockerignore b/src/core/dbt-api/.dockerignore new file mode 100644 index 00000000..c348dcb3 --- /dev/null +++ b/src/core/dbt-api/.dockerignore @@ -0,0 +1,45 @@ +# Version control directories +.git/ +.gitignore + +# Compiled application & artifacts +/_build/ +/deps/ + +# Files related to test, build and CI +/test/ +/cover/ +/.fetch +/.tool-versions +/.circleci/ +/.github/ +/.vscode/ +/docker-compose.* + +# Editor directories and files +.elixir_ls/ +.elixir-tools/ +.idea/ +*.swo +*.swp +*.swn +*.swm + +# Node.js artifacts +/assets/node_modules/ + +# Ignore custom files +architecture.png +README.md +/insomnia/ + +# Ignore .env files that may contain sensitive data +.env +.env.* + +# Ignore potential secrets stored in the credentials file +config/*.secret.exs + +# Ignore system files +.DS_Store +Thumbs.db diff --git a/src/core/dbt-api/.formatter.exs b/src/core/dbt-api/.formatter.exs new file mode 100644 index 00000000..2b5a7145 --- /dev/null +++ b/src/core/dbt-api/.formatter.exs @@ -0,0 +1,6 @@ +[ + import_deps: [:ecto, :ecto_sql, :phoenix, :open_api_spex], + subdirectories: ["priv/*/migrations"], + plugins: [Phoenix.LiveView.HTMLFormatter], + inputs: ["*.{heex,ex,exs}", "{config,lib,test}/**/*.{heex,ex,exs}", "priv/*/seeds.exs"] +] diff --git a/src/core/dbt-api/.gitignore b/src/core/dbt-api/.gitignore new file mode 100644 index 00000000..7328a823 --- /dev/null +++ b/src/core/dbt-api/.gitignore @@ -0,0 +1,44 @@ +# The directory Mix will write compiled artifacts to. +/_build/ + +# If you run "mix test --cover", coverage assets end up here. +/cover/ + +# The directory Mix downloads your dependencies sources to. +/deps/ + +# Ignore the NextLS extension data +/.elixir-tools/ + +# Where 3rd-party dependencies like ExDoc output generated docs. +/doc/ + +# Ignore .fetch files in case you like to edit your project deps locally. +/.fetch + +/.env + +# Ignore GitGuardian's Cache +.cache_ggshield + +# If the VM crashes, it generates a dump, let's ignore it too. +erl_crash.dump + +# Also ignore archive artifacts (built via "mix archive.build"). +*.ez + +# Temporary files, for example, from tests. +/tmp/ + +# Ignore package tarball (built via "mix hex.build"). +jade-*.tar + +# Ignore assets that are produced by build tools. +/priv/static/assets/ + +# Ignore digested assets cache. +/priv/static/cache_manifest.json + +# In case you use Node.js/npm, you want to ignore these. +npm-debug.log +/assets/node_modules/ diff --git a/src/core/dbt-api/.pre-commit-config.yaml b/src/core/dbt-api/.pre-commit-config.yaml new file mode 100644 index 00000000..94d648d2 --- /dev/null +++ b/src/core/dbt-api/.pre-commit-config.yaml @@ -0,0 +1,7 @@ +repos: + - repo: https://github.com/gitguardian/ggshield + rev: v1.21.0 + hooks: + - id: ggshield + language_version: python3 + stages: [commit] diff --git a/src/core/dbt-api/.tool-versions b/src/core/dbt-api/.tool-versions new file mode 100644 index 00000000..a25d1af5 --- /dev/null +++ b/src/core/dbt-api/.tool-versions @@ -0,0 +1,3 @@ +erlang 27.2.2 +elixir 1.18.2-otp-27 +python 3.9.2 \ No newline at end of file diff --git a/src/core/dbt-api/Dockerfile b/src/core/dbt-api/Dockerfile new file mode 100644 index 00000000..a1db44d3 --- /dev/null +++ b/src/core/dbt-api/Dockerfile @@ -0,0 +1,74 @@ +# Use the official Elixir image as a base +# Find eligible builder and runner images on Docker Hub. We use Ubuntu/Debian +# instead of Alpine to avoid DNS resolution issues in production. +# +# https://hub.docker.com/r/hexpm/elixir/tags?page=1&name=ubuntu +# https://hub.docker.com/_/ubuntu?tab=tags +# +# This file is based on these images: +# +# - https://hub.docker.com/r/hexpm/-elixir/tags - for the build image +# - https://hub.docker.com/_/debian?tab=tags&page=1&name=bullseye-20240130-slim - for the release image +# - https://pkgs.org/ - resource for finding needed packages +# - Ex: hexpm/elixir:1.15.0-erlang-26.0.1-debian-bullseye-20240130-slim +# +ARG ELIXIR_VERSION=1.18.2 +ARG OTP_VERSION=27.2.2 +ARG DEBIAN_VERSION=bullseye-20250203-slim + +ARG BUILDER_IMAGE="hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}" + +FROM ${BUILDER_IMAGE} AS builder + +# Without this, you can't build the image on macOS +# See: https://elixirforum.com/t/arm64-dockerfile-failing/57317/12 +ENV ERL_FLAGS="+JPperf true" + +# install build dependencies +RUN apt-get update -y && apt-get install -y build-essential git \ + && apt-get clean && rm -f /var/lib/apt/lists/*_* + +# install hex + rebar +RUN mix local.hex --force && \ + mix local.rebar --force + +# Create and set the working directory +WORKDIR "/app" + +# Set the build environment for fetching the dependencies +ENV MIX_ENV="prod" + +# Copy over your mix.exs and mix.lock files to install dependencies +COPY mix.exs mix.lock ./ +RUN mix deps.get --only $MIX_ENV +RUN mkdir config + +# copy compile-time config files before we compile dependencies +# to ensure any relevant config change will trigger the dependencies +# to be re-compiled. +COPY config/config.exs config/${MIX_ENV}.exs config/ +RUN mix deps.compile + +COPY priv priv + +COPY lib lib + +COPY assets assets + +# compile assets +RUN mix assets.deploy + +# Compile the release +RUN mix compile + +# Changes to config/runtime.exs don't require recompiling the code +COPY config/runtime.exs config/ + +# Copy over the rest of your app's code +COPY . . + +# Make the run script executable +RUN chmod +x run.sh + +# Run the Phoenix server +ENTRYPOINT ["./run.sh"] diff --git a/src/core/dbt-api/README.md b/src/core/dbt-api/README.md new file mode 100644 index 00000000..5063e256 --- /dev/null +++ b/src/core/dbt-api/README.md @@ -0,0 +1,324 @@ +# Jade ([Cove](https://en.wikipedia.org/wiki/Jade_Cove)) + +An Elixir Backend that provides a public API for Datacoves' clients. + +## Design Overview + +[Walkthrough Video](https://www.loom.com/share/dcaa2c3c5d4248e3b8c30b3fcfe7c79c) + +[Figma Link](https://www.figma.com/file/zOD6fT1iO5KLwrdbM5j7Te/Datacoves---dbt-Backend?type=whiteboard&node-id=22-146&t=L4lybp69AysuEXyR-4) + +![](architecture.png) + +## Setup + +### Install Elixir and erlang + +If you haven't already, you need to install Elixir and erlang on your machine. The easiest way is to use a version manager like [asdf](https://asdf-vm.com/). However, there are plenty of other ways explained [here](https://elixir-lang.org/install.html). + +To install `asdf`, please follow [this](https://asdf-vm.com/guide/getting-started.html) guide. + +To install Elixir and erlang using `asdf`, first open a shell in the project path and simply run `asdf install`. That will install the Elixir and erlang version as specified in the `.tool-versions` file. And that's it! 🚀 + +## Developing inside k8s + +This guide explains how to develop the project as a pod inside the Kubernetes cluster. This way of developing is preferred because our development and production environment is the same. + +### Symlinking this Repo to Datacoves + +First, we need to symlink this repository to the datacoves project so that we can hot-reload its code inside the k8s pod. + +To create the symlink, simply run `ln -s . ../datacoves/src/core/dbt-api`. Update the command with a path to your local datacoves folder. + +Now, you should see this repository as a folder in `src/core/dbt-api`. If not, delete whatever you see in `src/core/dbt-api` and run the command again. + +### Enabling Jade in the k8s cluster + +Inside the Datacoves project, go to the file `config/datacoveslocal.com/cluster-params.yaml` and enable the following flags: + +``` +local_dbt_api_volume: true +enable_dbt_api: true +expose_dbt_api: true +``` + +Then, run `./cli.py install`. This will create the `dbt-api-svc` service and `dbt-api` pod and expose the pod at [https://dbt.datacoveslocal.com](https://dbt.datacoveslocal.com). + +It will also mount the `src/core/dbt-api` folder into the k8s pod. So, whenever you change the code here, it will change in the k8s pod as well. + +### Configure Storage + +You can configure `dbt-api` to store all files in either `minio`, `s3`, or `(azure) blob`. + +#### Configure Minio + +To use Minio, you need to set the following environment variables: + +``` +STORAGE_ADAPTER="minio" +MINIO_BUCKET_NAME="fill-in" +MINIO_URL="fill-in" +MINIO_ACCESS_KEY="fill-in" +MINIO_SECRET_KEY="fill-in" +``` + +#### Configure S3 + +To use S3, you need to set the following environment variables: + +``` +STORAGE_ADAPTER="s3" +S3_BUCKET_NAME="fill-in" +S3_ACCESS_KEY="fill-in" +S3_SECRET_ACCESS_KEY="fill-in" +S3_REGION="fill-in" +``` + +#### Configure (Azure) Blob + +To use Blob, you need to set the following environment variables: + +``` +STORAGE_ADAPTER="blob" +BLOB_CONTAINER="fill-in" + +# Either define a connection string +BLOB_STORAGE_ACCOUNT_CONNECTION_STRING="fill-in" + +# Or define the account name and key. +# These are ignored if a connection string is present. +BLOB_STORAGE_ACCOUNT_NAME="fill-in" +BLOB_STORAGE_ACCOUNT_KEY="fill-in" +``` + +### Run a development server in k8s + +Next, you need to run a development webserver inside the k8s pod. From inside the datacoves repo, run `./cli.py pod_sh dbt-api`. + +Then, wait until you see a shell. You now have a shell inside the k8s pod. Now, run `./run.sh dev` and a development webserver will start. + +**This webserver does not start the Endpoint** because port 80 is already occupied by the Jade instance that is started automatically by the pod. So, if you need to test HTTP requests to the Jade API, run `./run.sh shell`. This will create an IEx session inside your running production Jade instance. To update its code, simply copy&paste the module that you want to update into the IEx session and hit `Enter`. This will update the code running in the production instance and when you make the next HTTP request, that code will be used. + +## Developing Locally + +**Not recommended**: Developing the project as a pod inside the Kubernetes cluster is preferred. + +The next section explains how you can develop Jade locally, that is without running it inside the Kubernetes cluster as a pod. + +### Expose the Kubernetes Postgres Pod to Localhost + +You need to connect to the Postgres pod that runs in the Kubernetes cluster to get the data from the Datacoves database and to create and use the Postgres database for this project. + +Make sure that the Datacoves Kubernetes cluster is up-and-running on your local machine. + +Check that the Postgres pod is running with: + +``` +> kubectl get pods -n core +NAME READY STATUS RESTARTS AGE +api-64d9c4776-9jw7l 1/1 Running 0 87m +beat-6bb9b8c69c-kshcm 1/1 Running 1 (67m ago) 87m +flower-59bcb596f8-cmmt9 1/1 Running 0 87m +postgres-postgresql-0 1/1 Running 0 8m16s # <- Check that this has 1/1 pods +redis-master-0 1/1 Running 0 87m +workbench-6b84984976-cjmb9 1/1 Running 0 87m +worker-68d8bff495-52pj4 1/1 Running 0 87m +``` + +Now, expose the Postgres pod to `localhost:5432` with this command: + +``` +kubectl port-forward --namespace core svc/postgres-postgresql 5432:5432 +``` + +This will start the port forwarding. You can stop it later with `CTRL + C`. + +If you're having trouble with the command above, check that you use the correct service name. Here, we use the service name `svc/postgres-postgresql`, but it might different. To find the correct service name, run this command: + +``` +> kubectl get services -n core +NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE +core-api-svc ClusterIP 10.96.69.6 80/TCP 88m +core-flower-svc ClusterIP 10.96.245.62 80/TCP 88m +core-workbench-svc ClusterIP 10.96.17.243 80/TCP 88m +postgres-postgresql ClusterIP 10.96.238.115 5432/TCP 88m # <- You'll need this NAME here +postgres-postgresql-hl ClusterIP None 5432/TCP 88m +redis-headless ClusterIP None 6379/TCP 88m +redis-master ClusterIP 10.96.123.67 6379/TCP 88m +``` + +You'll see that a service called `postgres-postgresql` is running. That's the name you should use in the `port-forward` command above, but don't forget to prefix it with `svc/SERVICE_NAME_HERE`. + +### Expose Airflow's Postgres Database to Localhost + +As of now, we fetch Airflow's data directly from its Database instead of through the API because this allows us to e.g. join different tables and offer faster query times. If you start this project locally, you need to expose the Airflow database to localhost. Here's how: + +#### Start Airflow as Superuser. + +1. Log into [api.datacoveslocal.com](https://api.datacoveslocal.com) and navigate to your User under `Users`. +2. Assign all available groups to yourself under the `Groups` section. Don't forget to hit `Save`! +3. Navigate to [datacoveslocal.com](https://datacoveslocal.com) +4. Go to `Account Administration` (small gear at the top right) -> `Environments` -> `Edit` on first entry -> `Stack Services` +5. Here make sure that the `Orchestrate` toggle is `ON`. Don't forget to hit `Save Changes`. +6. Now, you should see a few `Airflow`-pods when running `kubectl get pods --all-namespaces`. +7. Wait a minute and then go to `Account Administration` -> `Service Connections`. +8. Here, click the little lightning symbol `Test Connection`. This will test the Airflow connection. +9. Once the Airflow Connection is established successfully, the status in that row should change to `tested`. +10. Now, Airflow is running! + +#### Expose the Airflow Postgres to Localhost + +1. In a new terminal, run `kubectl port-forward --namespace dcw-dev123 svc/dev123-airflow-postgresql 5433:5432` + 1. Make sure that the namespace and service names match what you see when you run `kubectl get services --all-namespaces` +2. This will expose the Airflow database at `localhost:5433` (note this is not the standard `5432` port because that's where the Datacoves Postgres is already running) + +## Accessing the API + +### Start the Server + +To start your Phoenix server: + +- Run `mix setup` to install and setup dependencies +- Start Phoenix endpoint with `mix phx.server` or inside IEx with `iex -S mix phx.server` + +Now, the server should be running at `http://localhost:4000`. + +### Make Requests + +The easiest way to make requests is to install [Insomnia](https://insomnia.rest/) and to import the `requests.json` file from `./insomnia/requests.json`. That will import all endpoints and environment variables that you need. + +Otherwise, you can make requests to the following routes: + +``` +# DBT API Clone +GET /api/v2/accounts/:account_id JadeWeb.API.V2.AccountController :show +GET /api/v2/accounts/:account_id/projects JadeWeb.API.V2.ProjectController :index +GET /api/v2/accounts/:account_id/projects/:id JadeWeb.API.V2.ProjectController :show +GET /api/v2/accounts/:account_id/projects/:project_id/latest-run JadeWeb.API.V2.Project.LatestJobRunController :show +GET /api/v2/accounts/:account_id/environments JadeWeb.API.V2.EnvironmentController :index +GET /api/v2/accounts/:account_id/environments/:id JadeWeb.API.V2.EnvironmentController :show +GET /api/v2/accounts/:account_id/jobs JadeWeb.API.V2.JobController :index +GET /api/v2/accounts/:account_id/jobs/:id JadeWeb.API.V2.JobController :show +GET /api/v2/accounts/:account_id/runs JadeWeb.API.V2.JobRunController :index +GET /api/v2/accounts/:account_id/runs/:id JadeWeb.API.V2.JobRunController :show +GET /api/v2/accounts/:account_id/runs/:job_run_id/artifacts/:artifact JadeWeb.API.V2.ManifestController :show + +# Datacoves API +GET /api/v2/datacoves/manifests JadeWeb.API.Datacoves.ManifestController :show + Parameters: + Either to get the manifest for a specific DAG: + dag_id={dag_id} (e.g. "yaml_sample_dag") + environment_slug={e.g. "dev123"} + + Or to get the latest manifest for an environment: + environment_slug="e.g. dev123" + +POST /api/v2/datacoves/manifests JadeWeb.API.Datacoves.ManifestController :create + Parameters: + file={file_binary} + run_id={dag_run_id} (e.g. "manual__2023-12-02T09:49:46.105347+00:00") + environment_slug={e.g. "dev123"} + (optional) trimmed="false" for fetching the full manifest + +GET /api/v2/datacoves/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :show +POST /api/v2/datacoves/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :create +PATCH /api/v2/datacoves/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :update +PUT /api/v2/datacoves/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :update +DELETE /api/v2/datacoves/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :delete +GET /api/v2/datacoves/projects/:project_slug/latest-manifest JadeWeb.API.Datacoves.ProjectManifestController :show + Parameters: + (optional) trimmed="false" for fetching the full manifest + +# OpenApi Specs Endpoint +GET /api/v2/openapi OpenApiSpex.Plug.RenderSpec [] +GET /api/v2/swaggerui OpenApiSpex.Plug.SwaggerUI [path: "/api/v2/openapi"] + +# Heathcheck endpoint for monitoring +GET /api/v2/healthcheck JadeWeb.API.Datacoves.HealthcheckController :show + +# Legacy endpoints for "internal" calls. Will be removed once we migrated all calls to /api/v2/datacoves +GET /api/internal/manifests JadeWeb.API.Datacoves.ManifestController :show +POST /api/internal/manifests JadeWeb.API.Datacoves.ManifestController :create +GET /api/internal/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :show +POST /api/internal/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :create +PATCH /api/internal/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :update +PUT /api/internal/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :update +DELETE /api/internal/environments/:environment_slug/files JadeWeb.API.Datacoves.FileController :delete +GET /api/internal/projects/:project_slug/latest-manifest JadeWeb.API.Datacoves.ProjectManifestController :show +GET /api/internal/healthcheck JadeWeb.API.Datacoves.HealthcheckController :show +``` + +### Authenticate Requests + +All API endpoints (except OpenApi Specs and Healthcheck) are protected through a `Bearer Token` Authorization header. + +You can get the API Token from the Datacoves dashboard and provide it with the `Authorization` header, like this: + +``` +Authorization: Token {api_key} +``` + +You can now make requests to any endpoint, for example: + +``` +GET /api/v2/accounts/1/projects +RESPONSE BODY +{ + "data": [ + { + "id": 54, + "name": "Fake Project 879", + "url": "http://api.datacoves.com/accounts/1/projects/1", + "account_id": 1, + "updated_at": 1697906147, + "company_id": 126, + "created_at": 1697906147, + "integration_entity_id": "671", + "integration_id": 94, + "is_favorite": false, + "job_count": 409, + "workspace_id": 799 + } + ] +} +``` + +If you try to make requests to a path with another `account_id`, you'll get an error: + +``` +GET /api/v2/accounts/2/projects +RESPONSE BODY +{ + "errors": { + "message": "Invalid Account in Path. You have no accces to this account." + } +} +``` + +### Using OpenAPI Specs + +The application offers [OpenAPI](https://www.openapis.org/) specifications in two forms: + +1. as `JSON` file at: [/api/v2/openapi](http://localhost:4000/api/v2/openapi) +2. as `SwaggerUI` at: [/api/v2/swaggerui](http://localhost:4000/api/v2/swaggerui) + +You can test requests through the SwaggerUI by adding the client's `bearer token` through the `Authorize` button at the top right of the UI. After you added the token, you can execute the requests below. + +## Tests + +You can run all tests with the command: `mix test`. + +If you ever need to reset your test database, simply run: `MIX_ENV=test mix ecto.reset` + +Likewise, to reset your dev database, run: `mix ecto.reset` without the `MIX_ENV=test` prefix. + +## Docker + +To build a Docker image from the local project, run `docker compose build` + +If you want to re-build the docker image without any cached values, run `docker compose build --no-cache` + +To start the Docker image, run `docker compose up jade` + +To start both the Elixir Backend and Postgres, run `docker compose up`. +If you want to run the containers in the background, run `docker compose up -d`. diff --git a/src/core/dbt-api/architecture.png b/src/core/dbt-api/architecture.png new file mode 100644 index 00000000..29fe5d87 Binary files /dev/null and b/src/core/dbt-api/architecture.png differ diff --git a/src/core/dbt-api/assets/css/app.css b/src/core/dbt-api/assets/css/app.css new file mode 100644 index 00000000..378c8f90 --- /dev/null +++ b/src/core/dbt-api/assets/css/app.css @@ -0,0 +1,5 @@ +@import "tailwindcss/base"; +@import "tailwindcss/components"; +@import "tailwindcss/utilities"; + +/* This file is for your main application CSS */ diff --git a/src/core/dbt-api/assets/js/app.js b/src/core/dbt-api/assets/js/app.js new file mode 100644 index 00000000..df0cdd9f --- /dev/null +++ b/src/core/dbt-api/assets/js/app.js @@ -0,0 +1,41 @@ +// If you want to use Phoenix channels, run `mix help phx.gen.channel` +// to get started and then uncomment the line below. +// import "./user_socket.js" + +// You can include dependencies in two ways. +// +// The simplest option is to put them in assets/vendor and +// import them using relative paths: +// +// import "../vendor/some-package.js" +// +// Alternatively, you can `npm install some-package --prefix assets` and import +// them using a path starting with the package name: +// +// import "some-package" +// + +// Include phoenix_html to handle method=PUT/DELETE in forms and buttons. +import "phoenix_html" +// Establish Phoenix Socket and LiveView configuration. +import {Socket} from "phoenix" +import {LiveSocket} from "phoenix_live_view" +import topbar from "../vendor/topbar" + +let csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content") +let liveSocket = new LiveSocket("/live", Socket, {params: {_csrf_token: csrfToken}}) + +// Show progress bar on live navigation and form submits +topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"}) +window.addEventListener("phx:page-loading-start", _info => topbar.show(300)) +window.addEventListener("phx:page-loading-stop", _info => topbar.hide()) + +// connect if there are any LiveViews on the page +liveSocket.connect() + +// expose liveSocket on window for web console debug logs and latency simulation: +// >> liveSocket.enableDebug() +// >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session +// >> liveSocket.disableLatencySim() +window.liveSocket = liveSocket + diff --git a/src/core/dbt-api/assets/tailwind.config.js b/src/core/dbt-api/assets/tailwind.config.js new file mode 100644 index 00000000..dcfb401c --- /dev/null +++ b/src/core/dbt-api/assets/tailwind.config.js @@ -0,0 +1,90 @@ +// See the Tailwind configuration guide for advanced usage +// https://tailwindcss.com/docs/configuration + +const plugin = require("tailwindcss/plugin"); +const fs = require("fs"); +const path = require("path"); + +module.exports = { + content: ["./js/**/*.js", "../lib/jade_web.ex", "../lib/jade_web/**/*.*ex"], + theme: { + extend: { + colors: { + brand: "#FD4F00", + }, + }, + }, + plugins: [ + require("@tailwindcss/forms"), + // Allows prefixing tailwind classes with LiveView classes to add rules + // only when LiveView classes are applied, for example: + // + //
+ // + plugin(({ addVariant }) => + addVariant("phx-no-feedback", [ + ".phx-no-feedback&", + ".phx-no-feedback &", + ]), + ), + plugin(({ addVariant }) => + addVariant("phx-click-loading", [ + ".phx-click-loading&", + ".phx-click-loading &", + ]), + ), + plugin(({ addVariant }) => + addVariant("phx-submit-loading", [ + ".phx-submit-loading&", + ".phx-submit-loading &", + ]), + ), + plugin(({ addVariant }) => + addVariant("phx-change-loading", [ + ".phx-change-loading&", + ".phx-change-loading &", + ]), + ), + + // Embeds Heroicons (https://heroicons.com) into your app.css bundle + // See your `CoreComponents.icon/1` for more information. + // + plugin(function ({ matchComponents, theme }) { + let iconsDir = path.join(__dirname, "../deps/heroicons/optimized"); + let values = {}; + let icons = [ + ["", "/24/outline"], + ["-solid", "/24/solid"], + ["-mini", "/20/solid"], + ]; + icons.forEach(([suffix, dir]) => { + fs.readdirSync(path.join(iconsDir, dir)).forEach((file) => { + let name = path.basename(file, ".svg") + suffix; + values[name] = { name, fullPath: path.join(iconsDir, dir, file) }; + }); + }); + matchComponents( + { + hero: ({ name, fullPath }) => { + let content = fs + .readFileSync(fullPath) + .toString() + .replace(/\r?\n|\r/g, ""); + return { + [`--hero-${name}`]: `url('data:image/svg+xml;utf8,${content}')`, + "-webkit-mask": `var(--hero-${name})`, + mask: `var(--hero-${name})`, + "mask-repeat": "no-repeat", + "background-color": "currentColor", + "vertical-align": "middle", + display: "inline-block", + width: theme("spacing.5"), + height: theme("spacing.5"), + }; + }, + }, + { values }, + ); + }), + ], +}; diff --git a/src/core/dbt-api/assets/vendor/topbar.js b/src/core/dbt-api/assets/vendor/topbar.js new file mode 100644 index 00000000..41957274 --- /dev/null +++ b/src/core/dbt-api/assets/vendor/topbar.js @@ -0,0 +1,165 @@ +/** + * @license MIT + * topbar 2.0.0, 2023-02-04 + * https://buunguyen.github.io/topbar + * Copyright (c) 2021 Buu Nguyen + */ +(function (window, document) { + "use strict"; + + // https://gist.github.com/paulirish/1579671 + (function () { + var lastTime = 0; + var vendors = ["ms", "moz", "webkit", "o"]; + for (var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) { + window.requestAnimationFrame = + window[vendors[x] + "RequestAnimationFrame"]; + window.cancelAnimationFrame = + window[vendors[x] + "CancelAnimationFrame"] || + window[vendors[x] + "CancelRequestAnimationFrame"]; + } + if (!window.requestAnimationFrame) + window.requestAnimationFrame = function (callback, element) { + var currTime = new Date().getTime(); + var timeToCall = Math.max(0, 16 - (currTime - lastTime)); + var id = window.setTimeout(function () { + callback(currTime + timeToCall); + }, timeToCall); + lastTime = currTime + timeToCall; + return id; + }; + if (!window.cancelAnimationFrame) + window.cancelAnimationFrame = function (id) { + clearTimeout(id); + }; + })(); + + var canvas, + currentProgress, + showing, + progressTimerId = null, + fadeTimerId = null, + delayTimerId = null, + addEvent = function (elem, type, handler) { + if (elem.addEventListener) elem.addEventListener(type, handler, false); + else if (elem.attachEvent) elem.attachEvent("on" + type, handler); + else elem["on" + type] = handler; + }, + options = { + autoRun: true, + barThickness: 3, + barColors: { + 0: "rgba(26, 188, 156, .9)", + ".25": "rgba(52, 152, 219, .9)", + ".50": "rgba(241, 196, 15, .9)", + ".75": "rgba(230, 126, 34, .9)", + "1.0": "rgba(211, 84, 0, .9)", + }, + shadowBlur: 10, + shadowColor: "rgba(0, 0, 0, .6)", + className: null, + }, + repaint = function () { + canvas.width = window.innerWidth; + canvas.height = options.barThickness * 5; // need space for shadow + + var ctx = canvas.getContext("2d"); + ctx.shadowBlur = options.shadowBlur; + ctx.shadowColor = options.shadowColor; + + var lineGradient = ctx.createLinearGradient(0, 0, canvas.width, 0); + for (var stop in options.barColors) + lineGradient.addColorStop(stop, options.barColors[stop]); + ctx.lineWidth = options.barThickness; + ctx.beginPath(); + ctx.moveTo(0, options.barThickness / 2); + ctx.lineTo( + Math.ceil(currentProgress * canvas.width), + options.barThickness / 2 + ); + ctx.strokeStyle = lineGradient; + ctx.stroke(); + }, + createCanvas = function () { + canvas = document.createElement("canvas"); + var style = canvas.style; + style.position = "fixed"; + style.top = style.left = style.right = style.margin = style.padding = 0; + style.zIndex = 100001; + style.display = "none"; + if (options.className) canvas.classList.add(options.className); + document.body.appendChild(canvas); + addEvent(window, "resize", repaint); + }, + topbar = { + config: function (opts) { + for (var key in opts) + if (options.hasOwnProperty(key)) options[key] = opts[key]; + }, + show: function (delay) { + if (showing) return; + if (delay) { + if (delayTimerId) return; + delayTimerId = setTimeout(() => topbar.show(), delay); + } else { + showing = true; + if (fadeTimerId !== null) window.cancelAnimationFrame(fadeTimerId); + if (!canvas) createCanvas(); + canvas.style.opacity = 1; + canvas.style.display = "block"; + topbar.progress(0); + if (options.autoRun) { + (function loop() { + progressTimerId = window.requestAnimationFrame(loop); + topbar.progress( + "+" + 0.05 * Math.pow(1 - Math.sqrt(currentProgress), 2) + ); + })(); + } + } + }, + progress: function (to) { + if (typeof to === "undefined") return currentProgress; + if (typeof to === "string") { + to = + (to.indexOf("+") >= 0 || to.indexOf("-") >= 0 + ? currentProgress + : 0) + parseFloat(to); + } + currentProgress = to > 1 ? 1 : to; + repaint(); + return currentProgress; + }, + hide: function () { + clearTimeout(delayTimerId); + delayTimerId = null; + if (!showing) return; + showing = false; + if (progressTimerId != null) { + window.cancelAnimationFrame(progressTimerId); + progressTimerId = null; + } + (function loop() { + if (topbar.progress("+.1") >= 1) { + canvas.style.opacity -= 0.05; + if (canvas.style.opacity <= 0.05) { + canvas.style.display = "none"; + fadeTimerId = null; + return; + } + } + fadeTimerId = window.requestAnimationFrame(loop); + })(); + }, + }; + + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = topbar; + } else if (typeof define === "function" && define.amd) { + define(function () { + return topbar; + }); + } else { + this.topbar = topbar; + } +}.call(this, window, document)); diff --git a/src/core/dbt-api/build_and_push_docker_image.sh b/src/core/dbt-api/build_and_push_docker_image.sh new file mode 100755 index 00000000..bc562740 --- /dev/null +++ b/src/core/dbt-api/build_and_push_docker_image.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# Exit the script immediately if any command returns a non-zero exit status. +set -e + +if [ $# -eq 0 ]; then + echo "No version given. You must provide a version (e.g. 2.1)" + exit 1 +fi + +version="$1" + +docker build . --tag datacovesprivate/core-dbt-api:${version} --platform=linux/amd64 --push --provenance=false --network=host diff --git a/src/core/dbt-api/command.sh b/src/core/dbt-api/command.sh new file mode 100755 index 00000000..76bcf6b9 --- /dev/null +++ b/src/core/dbt-api/command.sh @@ -0,0 +1,34 @@ +#!/bin/sh +# Exit the script immediately if any command returns a non-zero exit status. +set -e + +if [ "$1" == "forward_postgres" ]; then + + kubectl port-forward --namespace core svc/postgres-postgresql 5432:5432 + +elif [ "$1" == "forward_airflow" ]; then + + kubectl port-forward --namespace dcw-dev123 svc/dev123-airflow-postgresql 5433:5432 + +elif [ "$1" == "forward_minio" ]; then + + kubectl port-forward -n core svc/minio 9000 9001 + +elif [ "$1" == "delete_airflow" ]; then + + services=$(kubectl get services --all-namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep -E '(airflow|minio)') + deployments=$(kubectl get deployments --all-namespaces -o jsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}' | grep -E '(airflow|minio)') + + for service in $services; do + kubectl delete service $service --namespace dcw-dev123 + done + + for deployment in $deployments; do + kubectl delete deployment $deployment --namespace dcw-dev123 + done + +else + + echo "Command $1 unknown!" + +fi diff --git a/src/core/dbt-api/config/config.exs b/src/core/dbt-api/config/config.exs new file mode 100644 index 00000000..0492d377 --- /dev/null +++ b/src/core/dbt-api/config/config.exs @@ -0,0 +1,59 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +config :jade, + ecto_repos: [Jade.Repo], + generators: [timestamp_type: :utc_datetime] + +config :jade, http_adapter: HTTPoison + +# Configures the endpoint +config :jade, JadeWeb.Endpoint, + url: [host: "localhost"], + adapter: Bandit.PhoenixAdapter, + render_errors: [ + formats: [html: JadeWeb.ErrorHTML, json: JadeWeb.ErrorJSON], + layout: false + ], + pubsub_server: Jade.PubSub, + live_view: [signing_salt: "Xam9siJm"] + +# Configure esbuild (the version is required) +config :esbuild, + version: "0.17.11", + default: [ + args: + ~w(js/app.js --bundle --target=es2017 --outdir=../priv/static/assets --external:/fonts/* --external:/images/*), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] + +# Configure tailwind (the version is required) +config :tailwind, + version: "3.3.2", + default: [ + args: ~w( + --config=tailwind.config.js + --input=css/app.css + --output=../priv/static/assets/app.css + ), + cd: Path.expand("../assets", __DIR__) + ] + +# Configures Elixir's Logger +config :logger, :console, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Use Jason for JSON parsing in Phoenix +config :phoenix, :json_library, Jason + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" diff --git a/src/core/dbt-api/config/dev.exs b/src/core/dbt-api/config/dev.exs new file mode 100644 index 00000000..1e44665b --- /dev/null +++ b/src/core/dbt-api/config/dev.exs @@ -0,0 +1,93 @@ +import Config + +# Configure your database +config :jade, Jade.Repo, + username: System.get_env("DB_USER", "postgres"), + password: System.get_env("DB_PASS", "password"), + hostname: System.get_env("DB_HOST", "localhost"), + database: System.get_env("DB_NAME", "jade_dev"), + port: String.to_integer(System.get_env("DB_PORT", "5432")), + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool_size: 2 + +# Configure the Database connection to Datacoves +config :jade, Datacoves.Repo, + username: System.get_env("DATACOVES_DB_USER", "postgres"), + password: System.get_env("DATACOVES_DB_PASS", "password"), + hostname: System.get_env("DATACOVES_DB_HOST", "localhost"), + database: System.get_env("DATACOVES_DB_NAME", "datacoves"), + port: String.to_integer(System.get_env("DATACOVES_DB_PORT", "5432")), + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool_size: 2 + +# For development, we disable any cache and enable +# debugging and code reloading. +# +# The watchers configuration can be used to run external +# watchers to your application. For example, we can use it +# to bundle .js and .css sources. +config :jade, JadeWeb.Endpoint, + http: [ip: {0, 0, 0, 0}, port: 4000], + check_origin: false, + code_reloader: true, + debug_errors: true, + secret_key_base: "BOu930aqRk5gana0XST7SMZifX5rnqkakWka6c0YF+QUdm80ctmm5CCO702lft6e", + watchers: [ + esbuild: {Esbuild, :install_and_run, [:default, ~w(--sourcemap=inline --watch)]}, + tailwind: {Tailwind, :install_and_run, [:default, ~w(--watch)]} + ] + +config :open_api_spex, :cache_adapter, OpenApiSpex.Plug.NoneCache + +config :jade, internal_bearer_token: "dev_internal_bearer_token" + +# ## SSL Support +# +# In order to use HTTPS in development, a self-signed +# certificate can be generated by running the following +# Mix task: +# +# mix phx.gen.cert +# +# Run `mix help phx.gen.cert` for more information. +# +# The `http:` config above can be replaced with: +# +# https: [ +# port: 4001, +# cipher_suite: :strong, +# keyfile: "priv/cert/selfsigned_key.pem", +# certfile: "priv/cert/selfsigned.pem" +# ], +# +# If desired, both `http:` and `https:` keys can be +# configured to run both http and https servers on +# different ports. + +# Watch static and templates for browser reloading. +config :jade, JadeWeb.Endpoint, + live_reload: [ + patterns: [ + ~r"priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$", + ~r"priv/gettext/.*(po)$", + ~r"lib/jade_web/(controllers|live|components)/.*(ex|heex)$" + ] + ] + +# Enable dev routes for dashboard and mailbox +config :jade, dev_routes: true + +# Do not include metadata nor timestamps in development logs +config :logger, :console, format: "[$level] $message\n" + +# Set a higher stacktrace during development. Avoid configuring such +# in production as building large stacktraces may be expensive. +config :phoenix, :stacktrace_depth, 20 + +# Initialize plugs at runtime for faster development compilation +config :phoenix, :plug_init_mode, :runtime + +# Include HEEx debug annotations as HTML comments in rendered markup +config :phoenix_live_view, :debug_heex_annotations, true diff --git a/src/core/dbt-api/config/prod.exs b/src/core/dbt-api/config/prod.exs new file mode 100644 index 00000000..96902670 --- /dev/null +++ b/src/core/dbt-api/config/prod.exs @@ -0,0 +1,14 @@ +import Config + +# Note we also include the path to a cache manifest +# containing the digested version of static files. This +# manifest is generated by the `mix assets.deploy` task, +# which you should run after static files are built and +# before starting your production server. +config :jade, JadeWeb.Endpoint, cache_static_manifest: "priv/static/cache_manifest.json" + +# Do not print debug messages in production +config :logger, level: :info + +# Runtime production configuration, including reading +# of environment variables, is done on config/runtime.exs. diff --git a/src/core/dbt-api/config/runtime.exs b/src/core/dbt-api/config/runtime.exs new file mode 100644 index 00000000..d36d3d05 --- /dev/null +++ b/src/core/dbt-api/config/runtime.exs @@ -0,0 +1,220 @@ +import Config + +# config/runtime.exs is executed for all environments, including +# during releases. It is executed after compilation and before the +# system starts, so it is typically used to load production configuration +# and secrets from environment variables or elsewhere. Do not define +# any compile-time configuration in here, as it won't be applied. +# The block below contains prod specific runtime configuration. + +# ## Using releases +# +# If you use `mix release`, you need to explicitly enable the server +# by passing the PHX_SERVER=true when you start it: +# +# PHX_SERVER=true bin/jade start +# +# Alternatively, you can use `mix phx.gen.release` to generate a `bin/server` +# script that automatically sets the env var above. +if System.get_env("PHX_SERVER") in ~w(true 1) do + config :jade, JadeWeb.Endpoint, server: true +end + +config :jade, + connect_to_airflow: System.get_env("CONNECT_TO_AIRFLOW", "false") in ~w(true 1), + start_endpoint: System.get_env("PHX_ENDPOINT", "true") in ~w(true 1) + +if config_env() == :test do + config :jade, :storage, + bucket: "jade-dev", + adapter: Jade.Storage.Minio, + minio_url: "http://localhost:9000", + minio_access_key: "minioadmin", + minio_secret_key: "minioadmin" +end + +if config_env() != :test do + config :jade, + fernet_key: System.fetch_env!("FERNET_KEY"), + datacoves_verify_url: System.get_env("DATACOVES_VERIFY_URL") + + storage_adapter = + case System.get_env("STORAGE_ADAPTER", "minio") do + "minio" -> :minio + "s3" -> :s3 + "blob" -> :blob + end + + if storage_adapter == :minio do + config :jade, :storage, + adapter: Jade.Storage.Minio, + minio_url: System.get_env("MINIO_URL", "http://localhost:9000"), + minio_access_key: System.get_env("MINIO_ACCESS_KEY", "minioadmin"), + minio_secret_key: System.get_env("MINIO_SECRET_KEY", "minioadmin"), + bucket: System.get_env("MINIO_BUCKET_NAME", "jade-dev") + end + + if storage_adapter == :s3 do + config :ex_aws, + access_key_id: [System.fetch_env!("S3_ACCESS_KEY"), :instance_role], + secret_access_key: [System.fetch_env!("S3_SECRET_ACCESS_KEY"), :instance_role], + region: System.fetch_env!("S3_REGION") + + config :jade, :storage, + adapter: Jade.Storage.S3, + bucket: System.get_env("S3_BUCKET_NAME", "jade-dev") + end + + if storage_adapter == :blob do + if connection_string = System.get_env("BLOB_STORAGE_ACCOUNT_CONNECTION_STRING") do + config :azurex, Azurex.Blob.Config, + default_container: System.fetch_env!("BLOB_CONTAINER"), + storage_account_connection_string: connection_string + else + config :azurex, Azurex.Blob.Config, + default_container: System.fetch_env!("BLOB_CONTAINER"), + storage_account_name: System.fetch_env!("BLOB_STORAGE_ACCOUNT_NAME"), + storage_account_key: System.fetch_env!("BLOB_STORAGE_ACCOUNT_KEY") + end + + config :jade, :storage, + adapter: Jade.Storage.Blob, + bucket: System.fetch_env!("BLOB_CONTAINER") + end +end + +if config_env() == :prod do + internal_bearer_token = System.fetch_env!("INTERNAL_BEARER_TOKEN") + config :jade, internal_bearer_token: internal_bearer_token + + maybe_ipv6 = if System.get_env("ECTO_IPV6") in ~w(true 1), do: [:inet6], else: [] + + maybe_jade_url = System.get_env("DB_URL") + maybe_datacoves_url = System.get_env("DATACOVES_DB_URL") + + if maybe_jade_url do + config :jade, Jade.Repo, url: maybe_jade_url + else + config :jade, Jade.Repo, + username: System.get_env("DB_USER", "postgres"), + password: System.get_env("DB_PASS", "password"), + hostname: System.get_env("DB_HOST", "localhost"), + database: System.get_env("DB_NAME", "jade_dev"), + port: String.to_integer(System.get_env("DB_PORT", "5432")), + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10"), + socket_options: maybe_ipv6 + end + + if maybe_datacoves_url do + config :jade, Datacoves.Repo, url: maybe_datacoves_url + else + config :jade, Datacoves.Repo, + username: System.get_env("DATACOVES_DB_USER", "postgres"), + password: System.get_env("DATACOVES_DB_PASS", "password"), + hostname: System.get_env("DATACOVES_DB_HOST", "localhost"), + database: System.get_env("DATACOVES_DB_NAME", "datacoves"), + port: String.to_integer(System.get_env("DATACOVES_DB_PORT", "5432")), + pool_size: String.to_integer(System.get_env("DATACOVES_POOL_SIZE") || "10"), + socket_options: maybe_ipv6 + end + + # Configure optional SSL for all Repos + config :jade, Jade.Repo, + ssl: String.to_existing_atom(System.get_env("DB_SSL_ENABLED", "false")), + ssl_opts: [ + verify: :verify_none + ] + + config :jade, Datacoves.Repo, + ssl: String.to_existing_atom(System.get_env("DB_SSL_ENABLED", "false")), + ssl_opts: [ + verify: :verify_none + ] + + config :jade, Airflow.Repo, + ssl: String.to_existing_atom(System.get_env("DB_SSL_ENABLED", "false")), + ssl_opts: [ + verify: :verify_none + ] + + # The secret key base is used to sign/encrypt cookies and other secrets. + # A default value is used in config/dev.exs and config/test.exs but you + # want to use a different value for prod and you most likely don't want + # to check this value into version control, so we use an environment + # variable instead. + secret_key_base = + System.get_env("SECRET_KEY_BASE") || + raise """ + environment variable SECRET_KEY_BASE is missing. + You can generate one by calling: mix phx.gen.secret + """ + + host = System.get_env("PHX_HOST") || "example.com" + port = String.to_integer(System.get_env("PORT") || "4000") + + config :jade, JadeWeb.Endpoint, + url: [host: host, port: 443, scheme: "https"], + check_origin: [ + host, + "*.core.svc.cluster.local" + ], + http: [ + # Enable IPv6 and bind on all interfaces. + # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access. + # See the documentation on https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html + # for details about using IPv6 vs IPv4 and loopback vs public addresses. + ip: {0, 0, 0, 0, 0, 0, 0, 0}, + port: port + ], + secret_key_base: secret_key_base + + # ## SSL Support + # + # To get SSL working, you will need to add the `https` key + # to your endpoint configuration: + # + # config :jade, JadeWeb.Endpoint, + # https: [ + # ..., + # port: 443, + # cipher_suite: :strong, + # keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"), + # certfile: System.get_env("SOME_APP_SSL_CERT_PATH") + # ] + # + # The `cipher_suite` is set to `:strong` to support only the + # latest and more secure SSL ciphers. This means old browsers + # and clients may not be supported. You can set it to + # `:compatible` for wider support. + # + # `:keyfile` and `:certfile` expect an absolute path to the key + # and cert in disk or a relative path inside priv, for example + # "priv/ssl/server.key". For all supported SSL configuration + # options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1 + # + # We also recommend setting `force_ssl` in your endpoint, ensuring + # no data is ever sent via http, always redirecting to https: + # + # config :jade, JadeWeb.Endpoint, + # force_ssl: [hsts: true] + # + # Check `Plug.SSL` for all available options in `force_ssl`. + + # ## Configuring the mailer + # + # In production you need to configure the mailer to use a different adapter. + # Also, you may need to configure the Swoosh API client of your choice if you + # are not using SMTP. Here is an example of the configuration: + # + # config :jade, Jade.Mailer, + # adapter: Swoosh.Adapters.Mailgun, + # api_key: System.get_env("MAILGUN_API_KEY"), + # domain: System.get_env("MAILGUN_DOMAIN") + # + # For this example you need include a HTTP client required by Swoosh API client. + # Swoosh supports Hackney and Finch out of the box: + # + # config :swoosh, :api_client, Swoosh.ApiClient.Hackney + # + # See https://hexdocs.pm/swoosh/Swoosh.html#module-installation for details. +end diff --git a/src/core/dbt-api/config/test.exs b/src/core/dbt-api/config/test.exs new file mode 100644 index 00000000..e6d12226 --- /dev/null +++ b/src/core/dbt-api/config/test.exs @@ -0,0 +1,54 @@ +import Config + +config :jade, + ecto_repos: [Jade.Repo, Datacoves.Repo] + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +config :jade, Jade.Repo, + username: "postgres", + password: System.get_env("DB_PASS", "password"), + hostname: "localhost", + database: "jade_test#{System.get_env("MIX_TEST_PARTITION")}", + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 2 + +config :jade, Datacoves.Repo, + priv: "test/datacoves_repo", + username: "postgres", + password: System.get_env("DB_PASS", "password"), + hostname: "localhost", + database: "datacoves_test#{System.get_env("MIX_TEST_PARTITION")}", + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 2 + +config :jade, Airflow.Repo, + priv: "test/airflow_repo", + username: "postgres", + password: System.get_env("DB_PASS", "password"), + hostname: "localhost", + database: "airflow_test#{System.get_env("MIX_TEST_PARTITION")}", + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 2 + +config :jade, + http_adapter: Support.Mocks.HTTPoisonMock, + datacoves_verify_url: "https://api.datacoveslocal.com/api/datacoves/verify" + +# We don't run a server during test. If one is required, +# you can enable the server option below. +config :jade, JadeWeb.Endpoint, + http: [ip: {127, 0, 0, 1}, port: 4002], + secret_key_base: "iwIf4aWKSisvWt92wor1xaAKry7fiPHRhWZQQkoA0dTFm1sN2o8Xwt4/i2Enxu22", + server: false + +config :jade, internal_bearer_token: "internal_bearer_token" + +# Print only warnings and errors during test +config :logger, level: :warning + +# Initialize plugs at runtime for faster test compilation +config :phoenix, :plug_init_mode, :runtime diff --git a/src/core/dbt-api/docker-compose.yml b/src/core/dbt-api/docker-compose.yml new file mode 100644 index 00000000..3f1d1be9 --- /dev/null +++ b/src/core/dbt-api/docker-compose.yml @@ -0,0 +1,30 @@ +version: "3" +services: + jade: + image: datacovesprivate/core-dbt-api:2.1 + build: + context: . + dockerfile: Dockerfile + environment: + INTERNAL_BEARER_TOKEN: dev_internal_bearer_token + SECRET_KEY_BASE: local_super_secret_key_base + IEX_COOKIE: monster + CONNECT_TO_AIRFLOW: false + DB_HOST: host.docker.internal + DATACOVES_DB_HOST: host.docker.internal + ports: + - 4000:4000 + minio: + image: bitnami/minio:2023.11.20 + ports: + - 9000:9000 + - 9001:9001 + environment: + - MINIO_ROOT_USER=minioadmin + - MINIO_ROOT_PASSWORD=minioadmin + postgres: + image: postgres:15.3 + ports: + - 5432:5432 + environment: + - POSTGRES_PASSWORD=password diff --git a/src/core/dbt-api/docs/big-eye-release.md b/src/core/dbt-api/docs/big-eye-release.md new file mode 100644 index 00000000..850c2499 --- /dev/null +++ b/src/core/dbt-api/docs/big-eye-release.md @@ -0,0 +1,118 @@ +# Playbook for releasing dbt-api + +Once BigEye is ready to fetch our data through a dbt cloud-like API, we need to do the following steps to enable them: + +## Enabling dbt-api in Production +First, we need to start the `dbt-api` pod in the production cluster. + +### Sync the Secrets + +In the cluster environment, sync the secrets with `./cli sync_secrets`. Make sure that you have the `core-dbt-api.env` file in the `config/{cluster_domain}/secrets` folder. + +If needed, create a new S3 bucket to use for uploading the manifests of the production cluster. There's a development one in 1Password. + +Next, set `dbt-api` to use S3 instead of Minio for uploading the manifests. Set the following environment variables in the `core-dbt-api.env`: + +``` +STORAGE_ADAPTER=s3 +S3_BUCKET_NAME=fill_in +S3_ACCESS_KEY=fill_in +S3_SECRET_ACCESS_KEY=fill_in +S3_REGION=fill_in +``` + +Also, double-check the `DB_*` and `DATACOVES_DB_*` environment variables. They need to point to the same Postgres database that Datacoves is using. All database env variables should be the same except for the `DB_NAME` and `DATACOVES_DB_NAME` ones. + +### Enabling dbt-api + +To start the `dbt-api` pod, change the `cluster-params.yml` like this: + +``` +enable_dbt_api: true +expose_dbt_api: true +``` + +Then run `./cli.py setup_core`. This will start the `dbt-api` pod and expose it through a service to the URL + +``` +dbt.{cluster_domain} +``` + +You can test that `dbt-api` is running by checking out the API docs at: + +``` +dbt.{cluster_domain}/api/v2/swaggerui +``` + +## Uploading Manifests + +### Checking the `dbt-coves` version + +Before starting Airflow, make sure that you've released a new version of `dbt-coves` after [this PR](https://github.com/datacoves/dbt-coves/pull/427) was merged. Make sure that Airflow is started with that `dbt-coves` version. Otherwise, it won't upload the manifests to `dbt-api`. + +### Checking network policies + +We're currently blocking network requests between an environment and the `core` namespace. Make sure that you have a network policy in place that allows HTTP requests from the Airflow worker or namespace to `dbt-api`. Otherwise, your manifest upload will fail with a timeout. + +### Checking the datacoves version + +Make sure that you've created a new release of `datacoves` after [this PR](https://github.com/datacoves/datacoves/pull/413) was merged. Otherwise, you can't enable the manifest upload. + +### Finally enabling the upload + +To enable the manifest upload for all DAGRuns in an environment, open the Environment in the Admin Panel and set `upload_manifest: true` in the `Airflow Config`. Hit `Sync cluster` afterwards to sync the new settings. Datacoves will now add three new environment variables to the Airflow worker which `dbt-coves` will check after every `dbt-coves dbt` command. If the command created a `manifest.json`, it will upload it to `dbt-api`. + +## Giving access to BigEye + +At this point, we offer all records for an account through the API at `dbt.{cluster_domain}/api/v2`. All BigEye needs is a `Token`` per `Account`. + +### Creating a Token + +To create a Token for an account, create a shell connection to `dbt-api` with `./cli pod_sh dbt-api` and run `./run.sh shell`. Now, you're inside the production `dbt-api` pod. + +First, you need the `account.id` for which you want to create a Token. The `id` is the integer id you can see in the Admin Panel when you go to `Accounts`. The first account has the id `1` and so on. You can also find the account id if you open the Account for editing in the Admin panel and look at the URL: + +``` +# The "account/1" part in the URL means this account has the id: 1 +https://api.datacoveslocal.com/panel/users/account/1/change/ +``` + +Now, you can create a new Token with this command from inside the shell: + +``` +iex> Jade.Auth.TokenRepo.create(%{account_id: ACCOUNT_ID}) +# This will give you such a result: +{:ok, + %Jade.Auth.Token{ + __meta__: #Ecto.Schema.Metadata<:loaded, "tokens">, + id: 1, + account_id: 1, + key: "long-string-here", + key_hash: "another-long-string-here", + inserted_at: ~U[2023-12-22 17:16:16Z], + updated_at: ~U[2023-12-22 17:16:16Z] + }} +``` + +The `key` here is your API Token. Give this to BigEye. They can only query records for account of the Token, so in this case, they can only query for records for the account `1`. If they need more than one account, they need to use more than one token. + +**Watch out: You will never see the token again!** + +Make sure to copy the `key` after you've created it. Only a hash of the token is stored and used for comparison. + +### Using the Token + +BigEye (or any other customer) must provide the token `key` from above as a `Bearer` token in their HTTP Request. + + +### Deleting a Token + +To delete a token, simply run these commands: + +```elixir +{:ok, token} = Jade.Auth.TokenRepo.get_by_account_id(ACCOUNT_ID) +# Then +{:ok, _token} = Jade.Auth.TokenRepo.delete(token) +``` + +This will delete the token and the next HTTP request won't be authorized anymore. \ No newline at end of file diff --git a/src/core/dbt-api/insomnia/requests.json b/src/core/dbt-api/insomnia/requests.json new file mode 100644 index 00000000..161785f0 --- /dev/null +++ b/src/core/dbt-api/insomnia/requests.json @@ -0,0 +1,320 @@ +{ + "_type": "export", + "__export_format": 4, + "__export_date": "2023-10-23T08:12:17.876Z", + "__export_source": "insomnia.desktop.app:v8.3.0", + "resources": [ + { + "_id": "req_e0776e4623df4d53ba0ce8290f8e24c0", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1698048689883, + "created": 1697903649680, + "url": "{{ _.base_url }}/accounts/{{ _.account_id }}/projects", + "name": "Projects - List", + "description": "", + "method": "GET", + "body": {}, + "parameters": [], + "headers": [{ "name": "User-Agent", "value": "insomnia/8.3.0" }], + "authentication": { + "type": "bearer", + "token": "{{ _.token }}", + "disabled": false + }, + "metaSortKey": -1697903649680, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "wrk_2461613d77194df08f34f2755d63016d", + "parentId": null, + "modified": 1697903282430, + "created": 1697903282430, + "name": "Jade Local", + "description": "", + "scope": "collection", + "_type": "workspace" + }, + { + "_id": "req_63852ce3759e4eebab75adcdd6b40071", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697904379527, + "created": 1697903732278, + "url": "{{ _.base_url }}/accounts/{{ _.account_id }}/projects/27", + "name": "Projects - Show", + "description": "", + "method": "GET", + "body": {}, + "parameters": [], + "headers": [{ "name": "User-Agent", "value": "insomnia/8.3.0" }], + "authentication": { "type": "bearer", "token": "{{ _.token }}" }, + "metaSortKey": -1697903604133.875, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "req_5c7cb128578d48849b4829a09b8bb783", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697903753815, + "created": 1697903750945, + "url": "{{ _.base_url }}/accounts/{{ _.account_id }}/jobs", + "name": "Jobs - List", + "description": "", + "method": "GET", + "body": {}, + "parameters": [], + "headers": [{ "name": "User-Agent", "value": "insomnia/8.3.0" }], + "authentication": { "type": "bearer", "token": "{{ _.token }}" }, + "metaSortKey": -1697903558587.75, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "req_759df01281fb4fd4bb8342636900ee22", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697903767098, + "created": 1697903760201, + "url": "{{ _.base_url }}/accounts/{{ _.account_id }}/jobs/6", + "name": "Jobs - Show", + "description": "", + "method": "GET", + "body": {}, + "parameters": [], + "headers": [{ "name": "User-Agent", "value": "insomnia/8.3.0" }], + "authentication": { "type": "bearer", "token": "{{ _.token }}" }, + "metaSortKey": -1697903513041.625, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "req_1fddee7149d64c448091f649d64f93fa", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697903794460, + "created": 1697903775650, + "url": "{{ _.base_url }}/accounts/{{ _.account_id }}/runs", + "name": "JobRuns - List", + "description": "", + "method": "GET", + "body": {}, + "parameters": [], + "headers": [{ "name": "User-Agent", "value": "insomnia/8.3.0" }], + "authentication": { "type": "bearer", "token": "{{ _.token }}" }, + "metaSortKey": -1697903456108.9688, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "req_2a25f1ca663e4eae8879d1b0a0d2a49b", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697903791750, + "created": 1697903782972, + "url": "{{ _.base_url }}/accounts/{{ _.account_id }}/runs/7", + "name": "JobRuns - Show", + "description": "", + "method": "GET", + "body": {}, + "parameters": [], + "headers": [{ "name": "User-Agent", "value": "insomnia/8.3.0" }], + "authentication": { "type": "bearer", "token": "{{ _.token }}" }, + "metaSortKey": -1697903399176.3125, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "req_6d6f1cb5be7f431980f8d92bf76f90f0", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1698048589015, + "created": 1697903285311, + "url": "{{ _.internal_url }}/tokens", + "name": "Token - Create", + "description": "", + "method": "POST", + "body": { + "mimeType": "application/json", + "text": "{\n\t\"token\": {\n\t\t\"account_id\": 1\n\t}\n}" + }, + "parameters": [ + { + "id": "pair_6c2d5a1c22204269b7c1c4fd72ad0e50", + "name": "", + "value": "", + "description": "" + } + ], + "headers": [ + { "name": "Content-Type", "value": "application/json" }, + { "name": "User-Agent", "value": "insomnia/8.3.0" } + ], + "authentication": { + "type": "bearer", + "token": "{{ _.internal_bearer_token }}" + }, + "metaSortKey": -1697903285311, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "req_7ffe20e7504b47f8ae407015ab9e916e", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1698048621552, + "created": 1698048574857, + "url": "{{ _.internal_url }}/tokens/2", + "name": "Token - Update/Replace", + "description": "", + "method": "PUT", + "body": {}, + "parameters": [ + { + "id": "pair_6c2d5a1c22204269b7c1c4fd72ad0e50", + "name": "", + "value": "", + "description": "" + } + ], + "headers": [{ "name": "User-Agent", "value": "insomnia/8.3.0" }], + "authentication": { + "type": "bearer", + "token": "{{ _.internal_bearer_token }}" + }, + "metaSortKey": -1697820751672.75, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "req_746e1b609a1c4ba88ee9178db18ce373", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697904247167, + "created": 1697903803098, + "url": "{{ _.internal_url }}/tokens/1", + "name": "Token - Delete", + "description": "", + "method": "DELETE", + "body": { + "mimeType": "application/json", + "text": "{\n\t\"token\": {\n\t\t\"account_id\": 1\n\t}\n}" + }, + "parameters": [ + { + "id": "pair_6c2d5a1c22204269b7c1c4fd72ad0e50", + "name": "", + "value": "", + "description": "" + } + ], + "headers": [ + { "name": "Content-Type", "value": "application/json" }, + { "name": "User-Agent", "value": "insomnia/8.3.0" } + ], + "authentication": { + "type": "bearer", + "token": "{{ _.internal_bearer_token }}" + }, + "metaSortKey": -1697738218034.5, + "isPrivate": false, + "settingStoreCookies": true, + "settingSendCookies": true, + "settingDisableRenderRequestBody": false, + "settingEncodeUrl": true, + "settingRebuildPath": true, + "settingFollowRedirects": "global", + "_type": "request" + }, + { + "_id": "env_e17f8f7f8c2e1e7ee6525fe8b3ec64de0a9043e0", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697903282431, + "created": 1697903282431, + "name": "Base Environment", + "data": {}, + "dataPropertyOrder": null, + "color": null, + "isPrivate": false, + "metaSortKey": 1697903282431, + "_type": "environment" + }, + { + "_id": "jar_e17f8f7f8c2e1e7ee6525fe8b3ec64de0a9043e0", + "parentId": "wrk_2461613d77194df08f34f2755d63016d", + "modified": 1697903282432, + "created": 1697903282432, + "name": "Default Jar", + "cookies": [], + "_type": "cookie_jar" + }, + { + "_id": "env_905e9d52400a4a6ebf8559895c64dd97", + "parentId": "env_e17f8f7f8c2e1e7ee6525fe8b3ec64de0a9043e0", + "modified": 1698048706597, + "created": 1697903301037, + "name": "Jade Local", + "data": { + "base_url": "http://localhost:4000/api/v2", + "internal_url": "http://localhost:4000/api/v2/datacoves", + "internal_bearer_token": "dev_internal_bearer_token", + "token": "ADD_A_CLIENT_TOKEN_FROM_POST_TOKEN_REQUEST", + "account_id": "1" + }, + "dataPropertyOrder": { + "&": [ + "base_url", + "internal_url", + "internal_bearer_token", + "token", + "account_id" + ] + }, + "color": null, + "isPrivate": false, + "metaSortKey": 1697903301037, + "_type": "environment" + } + ] +} diff --git a/src/core/dbt-api/lib/airflow/airflow.ex b/src/core/dbt-api/lib/airflow/airflow.ex new file mode 100644 index 00000000..86c13a47 --- /dev/null +++ b/src/core/dbt-api/lib/airflow/airflow.ex @@ -0,0 +1,19 @@ +defmodule Airflow do + def repository do + quote do + import Ecto.Query, warn: false + alias Airflow.Repo + end + end + + def schema do + quote do + use Ecto.Schema + import Ecto.Changeset + end + end + + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/src/core/dbt-api/lib/airflow/dag_runs/dag_run.ex b/src/core/dbt-api/lib/airflow/dag_runs/dag_run.ex new file mode 100644 index 00000000..11e0e6d4 --- /dev/null +++ b/src/core/dbt-api/lib/airflow/dag_runs/dag_run.ex @@ -0,0 +1,32 @@ +defmodule Airflow.DagRuns.DagRun do + @moduledoc """ + The DAGRun schema as stored in the Airflow Postgres database. + + The "State" is defined here: https://github.com/apache/airflow/blob/d4002261b57236ffdca9a5790097f295794965cf/airflow/utils/state.py#L73 + """ + use Airflow, :schema + + @valid_statuses [:queued, :running, :success, :failed] + + schema "dag_run" do + belongs_to :dag, Airflow.Dags.Dag, references: :dag_id, type: :string + + field :execution_date, :utc_datetime + field :state, Ecto.Enum, values: @valid_statuses + field :run_id, :string + field :external_trigger, :boolean + field :conf, :binary + field :end_date, :utc_datetime + field :start_date, :utc_datetime + field :run_type, :string + field :last_scheduling_decision, :utc_datetime + field :dag_hash, :string + field :creating_job_id, :integer + field :queued_at, :utc_datetime + field :data_interval_start, :utc_datetime + field :data_interval_end, :utc_datetime + field :log_template_id, :integer + end + + def valid_statuses(), do: @valid_statuses +end diff --git a/src/core/dbt-api/lib/airflow/dag_runs/dag_run_repo.ex b/src/core/dbt-api/lib/airflow/dag_runs/dag_run_repo.ex new file mode 100644 index 00000000..4aa0b8bb --- /dev/null +++ b/src/core/dbt-api/lib/airflow/dag_runs/dag_run_repo.ex @@ -0,0 +1,79 @@ +defmodule Airflow.DagRuns.DagRunRepo do + @moduledoc """ + The Repository for fetching DAGRuns from the Airflow Database. + """ + use Airflow, :repository + + alias Airflow.DagRuns.DagRun + + def list(environment) do + Repo.with_dynamic_repo( + environment, + fn -> Repo.all(DagRun) end, + fn -> [] end + ) + end + + def get(environment, dag_run_id) do + Repo.with_dynamic_repo( + environment, + fn -> + DagRun + |> Repo.get(dag_run_id) + |> Repo.normalize_one() + end + ) + end + + def get_by(environment, attrs) do + Repo.with_dynamic_repo( + environment, + fn -> + DagRun + |> where(^attrs) + |> Repo.one() + |> Repo.normalize_one() + end + ) + end + + def get_most_recent(environment, state \\ nil) do + Repo.with_dynamic_repo( + environment, + fn -> + from(dag_run in DagRun, + as: :dag_run, + where: not is_nil(dag_run.end_date), + order_by: [desc_nulls_last: dag_run.end_date], + limit: 1 + ) + |> maybe_filter_state(state) + |> Repo.one() + |> Repo.normalize_one() + end + ) + end + + def get_most_recent_for_dag(environment, dag_id, state \\ nil) do + Repo.with_dynamic_repo( + environment, + fn -> + from(dag_run in DagRun, + as: :dag_run, + where: dag_run.dag_id == ^dag_id, + order_by: [desc_nulls_last: dag_run.end_date], + limit: 1 + ) + |> maybe_filter_state(state) + |> Repo.one() + |> Repo.normalize_one() + end + ) + end + + defp maybe_filter_state(query, nil), do: query + + defp maybe_filter_state(query, state) do + where(query, [dag_run: dag_run], dag_run.state == ^state) + end +end diff --git a/src/core/dbt-api/lib/airflow/dags/dag.ex b/src/core/dbt-api/lib/airflow/dags/dag.ex new file mode 100644 index 00000000..7fb39d3d --- /dev/null +++ b/src/core/dbt-api/lib/airflow/dags/dag.ex @@ -0,0 +1,44 @@ +defmodule Airflow.Dags.Dag do + @moduledoc """ + The DAG schema as stored in the Airflow Postgres database. + """ + use Airflow, :schema + + @primary_key false + schema "dag" do + field :dag_id, :string, primary_key: true + field :default_view, :string + field :description, :string + field :fileloc, :string + field :has_import_errors, :boolean, default: false + field :has_task_concurrency_limits, :boolean + field :is_active, :boolean, default: true + field :is_paused, :boolean, default: false + field :is_subdag, :boolean, default: false + field :last_expired, :utc_datetime + field :last_parsed_time, :utc_datetime + field :last_pickled, :utc_datetime + field :max_active_runs, :integer + field :max_active_tasks, :integer + field :next_dagrun_create_after, :utc_datetime + field :next_dagrun_data_interval_end, :utc_datetime + field :next_dagrun_data_interval_start, :utc_datetime + field :next_dagrun, :utc_datetime + field :owners, :string + field :pickle_id, :integer + field :root_dag_id, :string + field :schedule_interval, :string + field :scheduler_lock, :boolean + field :timetable_description, :string + + has_many :dag_runs, Airflow.DagRuns.DagRun, foreign_key: :dag_id, references: :dag_id + + has_one :most_recent_dag_run, Airflow.DagRuns.DagRun, + foreign_key: :dag_id, + references: :dag_id + + has_one :most_recent_completed_dag_run, Airflow.DagRuns.DagRun, + foreign_key: :dag_id, + references: :dag_id + end +end diff --git a/src/core/dbt-api/lib/airflow/dags/dag_repo.ex b/src/core/dbt-api/lib/airflow/dags/dag_repo.ex new file mode 100644 index 00000000..9e6dd35a --- /dev/null +++ b/src/core/dbt-api/lib/airflow/dags/dag_repo.ex @@ -0,0 +1,27 @@ +defmodule Airflow.Dags.DagRepo do + @moduledoc """ + The Repository for fetching DAGs from the Airflow Database. + """ + use Airflow, :repository + + alias Airflow.Dags.Dag + + def list(environment, _params) do + Repo.with_dynamic_repo( + environment, + fn -> Repo.all(Dag) end, + fn -> [] end + ) + end + + def get(environment, dag_id) do + Repo.with_dynamic_repo( + environment, + fn -> + Dag + |> Repo.get(dag_id) + |> Repo.normalize_one() + end + ) + end +end diff --git a/src/core/dbt-api/lib/airflow/repo.ex b/src/core/dbt-api/lib/airflow/repo.ex new file mode 100644 index 00000000..ec1fd7c2 --- /dev/null +++ b/src/core/dbt-api/lib/airflow/repo.ex @@ -0,0 +1,48 @@ +defmodule Airflow.Repo do + use Ecto.Repo, + otp_app: :jade, + adapter: Ecto.Adapters.Postgres, + read_only: Mix.env() != :test + + require Ecto.Query + + @doc """ + Sets the correct dynamic repo on Airflow.Repo before making + a Database Request to an Airflow Postgres pod. If no Repo runs + for the given Environment, Airflow.Repos tries to start a new one + but returns `{:error, :not_found}` if it fails. Also, if the + environment has services -> airflow disabled, it returns + `{:error, :not_found}` immediately. + """ + def with_dynamic_repo(environment, callback, fallback \\ nil) do + case Airflow.Repos.get_repo_for_environment(environment) do + {:ok, pid} -> + Airflow.Repo.put_dynamic_repo(pid) + callback.() + + {:error, :not_found, _message} = error -> + if fallback, do: fallback.(), else: error + end + end + + def normalize_one(nil), do: {:error, :not_found} + def normalize_one(record), do: {:ok, record} + + def paginate(query, params) do + query + |> maybe_add_offset(params) + |> maybe_add_limit(params) + end + + defp maybe_add_offset(query, %{offset: offset}) when is_integer(offset) and offset >= 0 do + Ecto.Query.offset(query, ^offset) + end + + defp maybe_add_offset(query, _params), do: query + + defp maybe_add_limit(query, %{limit: limit}) when is_integer(limit) and limit >= 0 do + Ecto.Query.limit(query, ^limit) + end + + defp maybe_add_limit(query, _params), do: query +end diff --git a/src/core/dbt-api/lib/airflow/repos.ex b/src/core/dbt-api/lib/airflow/repos.ex new file mode 100644 index 00000000..1aad437d --- /dev/null +++ b/src/core/dbt-api/lib/airflow/repos.ex @@ -0,0 +1,136 @@ +defmodule Airflow.Repos do + @moduledoc """ + We may have multiple Postgres databases in a cluster, one per Environment. + + This module starts dynamic Repos to connect to them, one Repo per Database, + that means one Repo per Environment. + + The database might not be available yet when we start this application + because the Kubernetes pods are started in parallel. If a database is + unavailable when the Repo is started, the Repo will not be started. We + test - very stupidly - whether a Postgres pod is available by making a + GET http request to it. + + If an Environment is created after this process started and we try to fetch + the Repo pid for it, the Supervisor starts a new Repo if the environment + has Airflow enabled. Otherwise, it returns `{:error, :not_found}`. + + Any call to the Postgres databases must go through this module. + """ + use Supervisor + + @me __MODULE__ + @http_adapter Application.compile_env(:jade, :http_adapter) + + require Logger + + alias Airflow.Repo + + alias Jade.Environments.EnvironmentRepo + alias Jade.Environments.Environment + + def start_link(args) do + Supervisor.start_link(@me, [args], name: Keyword.get(args, :name, @me)) + end + + def init(_args) do + repos = build_repo_specs() + Supervisor.init(repos, strategy: :one_for_one) + end + + @spec get_repo_for_environment(Environment.t()) :: {:ok, pid()} | {:error, :not_found, binary()} + def get_repo_for_environment(%Environment{} = environment, supervisor \\ @me) do + supervisor + |> Supervisor.which_children() + |> Enum.find(fn {id, _pid, _type, _repo} -> id == environment.slug end) + |> maybe_return_repo(environment, supervisor) + end + + defp build_repo_specs() do + EnvironmentRepo.list() + |> Enum.map(&do_build_repo_spec/1) + |> Enum.reject(&is_nil/1) + end + + # Connect to an active Airflow database in Production + def do_build_repo_spec( + %Environment{ + services: %{ + "airflow" => %{"enabled" => true} + }, + airflow_config: %{"db" => %{"external" => true} = db_config}, + slug: slug + } = _environment + ) do + %{ + "host" => host, + "port" => port, + "user" => username, + "password" => password, + "database" => database + } = db_config + + url = "postgresql://#{username}:#{password}@#{host}:#{port}/#{database}" + config = build_connection(slug, url, Mix.env()) + %{id: slug, start: {Repo, :start_link, [config]}} + end + + # Connect to an active Airflow database in development or test + def do_build_repo_spec( + %Environment{ + services: %{ + "airflow" => %{"enabled" => true} + }, + slug: slug + } = _environment + ) do + domain = "#{slug}-airflow-postgresql.dcw-#{slug}.svc.cluster.local:5432" + + case @http_adapter.get(domain) do + # A :closed error means that the Postgres pod is available but refused our connecton request + # But we can connect to it. + {:error, %HTTPoison.Error{reason: :closed}} -> + url = "postgresql://postgres:postgres@#{domain}/postgres" + config = build_connection(slug, url, Mix.env()) + %{id: slug, start: {Repo, :start_link, [config]}} + + # Any other error means that the Postgres pod is not yet available and we shouldn't + # start a Repo for it. + _error -> + Logger.error("Cannot connect to Airflow repo at: #{domain}") + nil + end + end + + def do_build_repo_spec(%Environment{} = _environment), do: nil + + defp maybe_return_repo({_id, repo_pid, _type, _repo}, _environment, _supervisor) do + {:ok, repo_pid} + end + + defp maybe_return_repo(nil, environment, supervisor) do + case do_build_repo_spec(environment) do + nil -> {:error, :not_found, "Airflow Repo for Environment #{environment.slug} not found."} + child_spec -> Supervisor.start_child(supervisor, child_spec) + end + end + + # In test tests, connect to the local postgres as defined in test.exs instead + defp build_connection(name, _url, :test) do + query_args = ["SET search_path TO #{name}", []] + [name: nil, restart: :transient, pool_size: 1, after_connect: {Postgrex, :query!, query_args}] + end + + # In dev and prod, connect to the external postgres pod + defp build_connection(_name, url, _env) do + sanitized_url = url |> String.split(~r/[:@]/) |> List.replace_at(2, "hidden") |> Enum.join("") + Logger.info("Trying to connect to Airflow Repo at: #{sanitized_url}") + + [ + name: nil, + restart: :transient, + pool_size: 2, + url: url + ] + end +end diff --git a/src/core/dbt-api/lib/airflow/repos_test.exs b/src/core/dbt-api/lib/airflow/repos_test.exs new file mode 100644 index 00000000..9d26f1f3 --- /dev/null +++ b/src/core/dbt-api/lib/airflow/repos_test.exs @@ -0,0 +1,68 @@ +defmodule Airflow.ReposTest do + use Jade.DataCase, async: false + + alias Airflow.Repos + alias Jade.Environments.EnvironmentRepo + + describe "init/1" do + test "starts a new repo per airflow-enabled and available environment" do + _env_airflow_disabled = + insert(:environment, services: %{"airflow" => %{"enabled" => false}}, slug: "disabled") + + _env_airflow_enabled_wo_pods = + insert(:environment, + services: %{"airflow" => %{"enabled" => true}}, + slug: "enabled-wo-pods" + ) + + _env_airflow_enabled_with_pods = + insert(:environment, services: %{"airflow" => %{"enabled" => true}}, slug: "enabled") + + assert capture_log(fn -> + pid = start_supervised!({Repos, name: :test1}) + + # Starts an Airflow.Repo connection for the "enabled" environment. + assert [{"enabled", _pid, :worker, [Airflow.Repo]}] = + Supervisor.which_children(pid) + end) =~ "Cannot connect to Airflow repo at:" + end + end + + describe "get_repo_for_environment/2" do + test "returns the repo for an enabled environment" do + insert(:environment, services: %{"airflow" => %{"enabled" => true}}, slug: "enabled") + [enabled_env] = EnvironmentRepo.list() + + pid = start_supervised!({Repos, name: :test2}) + assert [{"enabled", repo_pid, :worker, [Airflow.Repo]}] = Supervisor.which_children(pid) + + {:ok, res_pid} = Repos.get_repo_for_environment(enabled_env, pid) + assert repo_pid == res_pid + end + + test "returns an error of the env is not enabled" do + insert(:environment, services: %{"airflow" => %{"enabled" => false}}, slug: "disabled") + [disabled_env] = EnvironmentRepo.list() + + pid = start_supervised!({Repos, name: :test3}) + + {:error, :not_found, "Airflow Repo for Environment disabled not found."} = + Repos.get_repo_for_environment(disabled_env, pid) + end + + test "starts a new repo if an environment is enabled but no repo was started during init" do + pid = start_supervised!({Repos, name: :test4}) + # After init, no child Repos were started + assert [] = Supervisor.which_children(pid) + + insert(:environment, services: %{"airflow" => %{"enabled" => true}}, slug: "enabled") + [enabled_env] = EnvironmentRepo.list() + + {:ok, res_pid} = Repos.get_repo_for_environment(enabled_env, pid) + + # After the first call, a child Repos was started + assert [{"enabled", repo_pid, :worker, [Airflow.Repo]}] = Supervisor.which_children(pid) + assert repo_pid == res_pid + end + end +end diff --git a/src/core/dbt-api/lib/datacoves/accounts/account.ex b/src/core/dbt-api/lib/datacoves/accounts/account.ex new file mode 100644 index 00000000..c1d9de92 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/accounts/account.ex @@ -0,0 +1,27 @@ +defmodule Datacoves.Accounts.Account do + @moduledoc """ + The Account schema as stored in the Datacoves Postgres database. + """ + use Datacoves, :schema + + schema "users_account" do + field :approve_billing_events, :boolean + field :cancelled_subscription, :map + field :created_at, :utc_datetime + field :created_by_id, :integer + field :customer_id, :string + field :deactivated_at, :utc_datetime + field :developer_licenses, :integer + field :name, :string + field :notifications_enabled, :map + field :plan_id, :integer + field :settings, :map + field :slug, :string + field :subscription_updated_at, :utc_datetime + field :subscription, :map + field :trial_ends_at, :utc_datetime + field :trial_started_at, :utc_datetime + field :updated_at, :utc_datetime + field :workers_execution_limit, :map + end +end diff --git a/src/core/dbt-api/lib/datacoves/accounts/account_repo.ex b/src/core/dbt-api/lib/datacoves/accounts/account_repo.ex new file mode 100644 index 00000000..747a9802 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/accounts/account_repo.ex @@ -0,0 +1,23 @@ +defmodule Datacoves.Accounts.AccountRepo do + @moduledoc """ + The Project repository for fetching Account data from + the Datacoves Postgres database. + + The Datacoves.Repo connection is read-only, so this + repository offers no write operations. + + Our API receives requests from individual users/accounts only, + so we don't need to list all accounts, but only fetch the + account of the requesting user. That's why we only have a `get/1` + function and no `list/1` function as in other repositories. + """ + use Datacoves, :repository + + alias Datacoves.Accounts.Account + + def get(account_id) do + Account + |> Repo.get(account_id) + |> Repo.normalize_one() + end +end diff --git a/src/core/dbt-api/lib/datacoves/auth_tokens/auth_token.ex b/src/core/dbt-api/lib/datacoves/auth_tokens/auth_token.ex new file mode 100644 index 00000000..de75c85f --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/auth_tokens/auth_token.ex @@ -0,0 +1,14 @@ +defmodule Datacoves.AuthTokens.AuthToken do + @moduledoc """ + The AuthToken schema as stored in the Datacoves Postgres database. + """ + use Datacoves, :schema + + @primary_key false + schema "authtoken_token" do + field :key, :string, primary_key: true + field :created, :utc_datetime + + belongs_to :user, Datacoves.Users.User + end +end diff --git a/src/core/dbt-api/lib/datacoves/auth_tokens/auth_token_repo.ex b/src/core/dbt-api/lib/datacoves/auth_tokens/auth_token_repo.ex new file mode 100644 index 00000000..99d462d4 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/auth_tokens/auth_token_repo.ex @@ -0,0 +1,21 @@ +defmodule Datacoves.AuthTokens.AuthTokenRepo do + @moduledoc """ + The AuthToken repository for fetching tokens from the Datacoves database. + """ + + use Datacoves, :repository + + alias Datacoves.AuthTokens.AuthToken + + @default_preloads [ + user: [:permissions, groups: [extended_group: [:account, :project, :environment]]] + ] + + def get_by(attrs, preloads \\ @default_preloads) do + AuthToken + |> where(^attrs) + |> preload(^preloads) + |> Repo.one() + |> Repo.normalize_one() + end +end diff --git a/src/core/dbt-api/lib/datacoves/datacoves.ex b/src/core/dbt-api/lib/datacoves/datacoves.ex new file mode 100644 index 00000000..321c7e9d --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/datacoves.ex @@ -0,0 +1,19 @@ +defmodule Datacoves do + def repository do + quote do + import Ecto.Query, warn: false + alias Datacoves.Repo + end + end + + def schema do + quote do + use Ecto.Schema + import Ecto.Changeset + end + end + + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/src/core/dbt-api/lib/datacoves/environments/environment.ex b/src/core/dbt-api/lib/datacoves/environments/environment.ex new file mode 100644 index 00000000..26acbd46 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/environments/environment.ex @@ -0,0 +1,63 @@ +defmodule Datacoves.Environments.Environment do + @moduledoc """ + The Environment Schema as stored in the Datacoves database. + """ + use Datacoves, :schema + + alias __MODULE__ + + schema "projects_environment" do + field :airbyte_config, :binary + field :airflow_config, :binary + field :cluster_id, :integer + field :dbt_docs_config, :binary + field :dbt_home_path, :string + field :dbt_profiles_dir, :string + field :docker_config_secret_name, :string + field :docker_config, :binary + field :docker_registry, :string + field :internal_services, :map + field :minio_config, :binary + field :name, :string + field :pomerium_config, :binary + field :profile_id, :integer + field :quotas, :map + field :release_id, :integer + field :release_profile, :string + field :services, :map + field :settings, :map + field :slug, :string + field :superset_config, :binary + field :sync, :boolean + field :type, :string + field :update_strategy, :string + field :workspace_generation, :integer + + belongs_to :project, Datacoves.Projects.Project + + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + end + + @doc """ + Decrypts a Fernet encrypted field from the Datacoves environment. + """ + def decrypt_json_field!(%Environment{} = environment, field) do + Mix.env() + |> do_decrypt_json_field(environment, field) + end + + defp do_decrypt_json_field(:test, _environment, _field) do + %{ + "db" => %{"external" => false} + } + end + + defp do_decrypt_json_field(_env, environment, field) do + ciphertext = Map.get(environment, field) + plaintext = Fernet.verify!(ciphertext, key: fernet_key(), enforce_ttl: false) + Jason.decode!(plaintext) + end + + defp fernet_key(), do: Application.get_env(:jade, :fernet_key) +end diff --git a/src/core/dbt-api/lib/datacoves/environments/environment_repo.ex b/src/core/dbt-api/lib/datacoves/environments/environment_repo.ex new file mode 100644 index 00000000..c4a69d3b --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/environments/environment_repo.ex @@ -0,0 +1,55 @@ +defmodule Datacoves.Environments.EnvironmentRepo do + @moduledoc """ + The Environment repository for fetching Environment data from + the Datacoves Postgres database. + + The Datacoves.Repo connection is read-only, so this + repository offers no write operations. + """ + use Datacoves, :repository + + alias Datacoves.Environments.Environment + + @default_preloads [:project] + + def list(), do: Environment |> Repo.all() |> Repo.preload(@default_preloads) + + def list(%{account_id: account_id, project_id: project_id} = params) do + account_id + |> base_query() + |> where([project: project], project.id == ^project_id) + |> Repo.paginate(params) + |> Repo.all() + end + + def list(%{account_id: account_id} = params) do + account_id + |> base_query() + |> Repo.paginate(params) + |> Repo.all() + end + + def get(account_id, environment_id) do + account_id + |> base_query() + |> Repo.get(environment_id) + |> Repo.normalize_one() + end + + def get_by(attrs) do + Environment + |> where(^attrs) + |> preload(^@default_preloads) + |> Repo.one() + |> Repo.normalize_one() + end + + defp base_query(account_id) do + from(environment in Environment, + join: project in assoc(environment, :project), + as: :project, + where: project.account_id == ^account_id, + preload: ^@default_preloads + ) + end +end diff --git a/src/core/dbt-api/lib/datacoves/groups/extended_group.ex b/src/core/dbt-api/lib/datacoves/groups/extended_group.ex new file mode 100644 index 00000000..3c0171b9 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/groups/extended_group.ex @@ -0,0 +1,18 @@ +defmodule Datacoves.Groups.ExtendedGroup do + @moduledoc """ + The ExtendedGroup schema as stored in the Datacoves database. + """ + + use Datacoves, :schema + + schema "users_extendedgroup" do + field :name, :string + field :identity_groups, {:array, :string} + field :role, :string + + belongs_to :group, Datacoves.Groups.Group + belongs_to :account, Datacoves.Accounts.Account + belongs_to :environment, Datacoves.Environments.Environment + belongs_to :project, Datacoves.Projects.Project + end +end diff --git a/src/core/dbt-api/lib/datacoves/groups/group.ex b/src/core/dbt-api/lib/datacoves/groups/group.ex new file mode 100644 index 00000000..bfe1aa81 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/groups/group.ex @@ -0,0 +1,14 @@ +defmodule Datacoves.Groups.Group do + @moduledoc """ + The Group schema as stored in the Datacoves database. + """ + + use Datacoves, :schema + + schema "auth_group" do + field :name, :string + + many_to_many :users, Datacoves.Users.User, join_through: "users_user_groups" + has_one :extended_group, Datacoves.Groups.ExtendedGroup + end +end diff --git a/src/core/dbt-api/lib/datacoves/permissions/permission.ex b/src/core/dbt-api/lib/datacoves/permissions/permission.ex new file mode 100644 index 00000000..4ca056eb --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/permissions/permission.ex @@ -0,0 +1,15 @@ +defmodule Datacoves.Permissions.Permission do + @moduledoc """ + The Permission schema as stored in the Datacoves database. + """ + + use Datacoves, :schema + + schema "auth_permission" do + field :name, :string + field :content_type_id, :integer + field :codename, :string + + many_to_many :users, Datacoves.Users.User, join_through: "users_user_user_permissions" + end +end diff --git a/src/core/dbt-api/lib/datacoves/projects/project.ex b/src/core/dbt-api/lib/datacoves/projects/project.ex new file mode 100644 index 00000000..01b45b36 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/projects/project.ex @@ -0,0 +1,25 @@ +defmodule Datacoves.Projects.Project do + @moduledoc """ + The Project schema as stored in the Datacoves Postgres database. + """ + use Datacoves, :schema + + schema "projects_project" do + field :ci_home_url, :string + field :ci_provider, :string + field :clone_strategy, :string + field :deploy_credentials, :binary + field :deploy_key_id, :integer + field :name, :string + field :release_branch, :string + field :repository_id, :integer + field :settings, :map + field :slug, :string + field :validated_at, :utc_datetime + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + + belongs_to :account, Datacoves.Accounts.Account + has_many :environments, Datacoves.Environments.Environment + end +end diff --git a/src/core/dbt-api/lib/datacoves/projects/project_repo.ex b/src/core/dbt-api/lib/datacoves/projects/project_repo.ex new file mode 100644 index 00000000..911608cb --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/projects/project_repo.ex @@ -0,0 +1,26 @@ +defmodule Datacoves.Projects.ProjectRepo do + @moduledoc """ + The Project repository for fetching Project data from + the Datacoves Postgres database. + + The Datacoves.Repo connection is read-only, so this + repository offers no write operations. + """ + use Datacoves, :repository + + alias Datacoves.Projects.Project + + def list(%{account_id: account_id} = params) do + Project + |> where(account_id: ^account_id) + |> Repo.paginate(params) + |> Repo.all() + end + + def get_by(attrs) do + Project + |> where(^attrs) + |> Repo.one() + |> Repo.normalize_one() + end +end diff --git a/src/core/dbt-api/lib/datacoves/repo.ex b/src/core/dbt-api/lib/datacoves/repo.ex new file mode 100644 index 00000000..7b0fb4b2 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/repo.ex @@ -0,0 +1,29 @@ +defmodule Datacoves.Repo do + use Ecto.Repo, + otp_app: :jade, + adapter: Ecto.Adapters.Postgres, + read_only: Mix.env() != :test + + require Ecto.Query + + def normalize_one(nil), do: {:error, :not_found} + def normalize_one(record), do: {:ok, record} + + def paginate(query, params) do + query + |> maybe_add_offset(params) + |> maybe_add_limit(params) + end + + defp maybe_add_offset(query, %{offset: offset}) when is_integer(offset) and offset >= 0 do + Ecto.Query.offset(query, ^offset) + end + + defp maybe_add_offset(query, _params), do: query + + defp maybe_add_limit(query, %{limit: limit}) when is_integer(limit) and limit >= 0 do + Ecto.Query.limit(query, ^limit) + end + + defp maybe_add_limit(query, _params), do: query +end diff --git a/src/core/dbt-api/lib/datacoves/users/user.ex b/src/core/dbt-api/lib/datacoves/users/user.ex new file mode 100644 index 00000000..67487c59 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/users/user.ex @@ -0,0 +1,28 @@ +defmodule Datacoves.Users.User do + @moduledoc """ + The User schema as stored in the Datacoves database. + """ + use Datacoves, :schema + + schema "users_user" do + field :eid, :binary_id + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + field :password, :string + field :last_login, :utc_datetime + field :email, :string + field :name, :string + field :avatar, :string + field :deactivated_at, :utc_datetime + field :is_superuser, :boolean + field :settings, :map + field :is_service_account, :boolean + field :slug, :string + + has_many :auth_tokens, Datacoves.AuthTokens.AuthToken + + many_to_many :permissions, Datacoves.Permissions.Permission, join_through: "users_user_user_permissions" + + many_to_many :groups, Datacoves.Groups.Group, join_through: "users_user_groups" + end +end diff --git a/src/core/dbt-api/lib/datacoves/users/user_repo.ex b/src/core/dbt-api/lib/datacoves/users/user_repo.ex new file mode 100644 index 00000000..7b1178b4 --- /dev/null +++ b/src/core/dbt-api/lib/datacoves/users/user_repo.ex @@ -0,0 +1,17 @@ +defmodule Datacoves.Users.UserRepo do + @moduledoc """ + The repository for the Datacoves' User schema. + """ + + use Datacoves, :repository + + alias Datacoves.Users.User + + def get_extended_groups(%User{} = user) do + user = Repo.preload(user, groups: [:extended_group]) + + user.groups + |> Enum.map(fn group -> group.extended_group end) + |> Enum.reject(&is_nil/1) + end +end diff --git a/src/core/dbt-api/lib/jade.ex b/src/core/dbt-api/lib/jade.ex new file mode 100644 index 00000000..06ff337b --- /dev/null +++ b/src/core/dbt-api/lib/jade.ex @@ -0,0 +1,30 @@ +defmodule Jade do + @moduledoc """ + Jade keeps the contexts that define your domain + and business logic. + + Contexts are also responsible for managing your data, regardless + if it comes from the database, an external API or others. + """ + + def repository do + quote do + import Ecto.Query, warn: false + alias Jade.Repo + end + end + + def schema do + quote do + use Ecto.Schema + import Ecto.Changeset + end + end + + @doc """ + When used, dispatch to the appropriate schema etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/src/core/dbt-api/lib/jade/accounts/account.ex b/src/core/dbt-api/lib/jade/accounts/account.ex new file mode 100644 index 00000000..2ffd1bb3 --- /dev/null +++ b/src/core/dbt-api/lib/jade/accounts/account.ex @@ -0,0 +1,50 @@ +defmodule Jade.Accounts.Account do + @moduledoc """ + The dbt Cloud Account Schema. Its data comes from the Datacoves database. + """ + use Jade, :schema + + @primary_key false + embedded_schema do + field :id, :integer + field :name, :string + field :state, :integer + field :plan, :string + field :pending_cancel, :boolean + field :run_slots, :integer + field :developer_seats, :integer + field :it_seats, :integer + field :read_only_seats, :integer + field :pod_memory_request_mebibytes, :integer + field :run_duration_limit_seconds, :integer + field :queue_limit, :integer + field :stripe_customer_id, :integer + field :metronome_customer_id, :integer + field :salesforce_customer_id, :integer + field :third_party_billing, :boolean + field :billing_email_address, :string + field :locked, :boolean + field :lock_reason, :string + field :lock_cause, :string + field :develop_file_system, :boolean + field :unlocked_at, :utc_datetime + field :unlock_if_subscription_renewed, :boolean + field :enterprise_authentication_method, :string + field :enterprise_login_slug, :string + field :enterprise_unique_identifier, :string + field :business_critical, :boolean + # field :groups, :string <- Add later if needed. + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + field :starter_repo_url, :string + field :git_auth_level, :string + field :identifier, :string + field :trial_end_date, :utc_datetime + field :static_subdomain, :string + field :run_locked_until, :utc_datetime + # Deprecated + field :docs_job_id, :integer + # Deprecated + field :freshness_job_id, :integer + end +end diff --git a/src/core/dbt-api/lib/jade/accounts/account_repo.ex b/src/core/dbt-api/lib/jade/accounts/account_repo.ex new file mode 100644 index 00000000..f40d1edf --- /dev/null +++ b/src/core/dbt-api/lib/jade/accounts/account_repo.ex @@ -0,0 +1,62 @@ +defmodule Jade.Accounts.AccountRepo do + @moduledoc """ + The Repository for JobRuns. + """ + + alias Datacoves.Accounts.AccountRepo + alias Jade.Accounts.Account, as: JadeAccount + alias Datacoves.Accounts.Account, as: DatacovesAccount + + @spec get(integer()) :: {:ok, JobRun.t()} | {:error, :not_found} + def get(account_id) do + with {:ok, datacoves_account} <- AccountRepo.get(account_id) do + {:ok, convert(datacoves_account)} + end + end + + defp convert(accounts) when is_list(accounts) do + Enum.map(accounts, &convert/1) + end + + defp convert(%DatacovesAccount{} = account) do + %JadeAccount{ + id: account.id, + billing_email_address: nil, + business_critical: nil, + created_at: account.created_at, + develop_file_system: nil, + developer_seats: account.developer_licenses, + docs_job_id: nil, + enterprise_authentication_method: nil, + enterprise_login_slug: nil, + enterprise_unique_identifier: nil, + freshness_job_id: nil, + git_auth_level: nil, + identifier: account.slug, + it_seats: nil, + lock_cause: nil, + lock_reason: nil, + locked: nil, + metronome_customer_id: nil, + name: account.name, + pending_cancel: nil, + plan: account.plan_id, + pod_memory_request_mebibytes: nil, + queue_limit: nil, + read_only_seats: nil, + run_duration_limit_seconds: nil, + run_locked_until: nil, + run_slots: nil, + salesforce_customer_id: nil, + starter_repo_url: nil, + state: nil, + static_subdomain: nil, + stripe_customer_id: nil, + third_party_billing: nil, + trial_end_date: account.trial_ends_at, + unlock_if_subscription_renewed: nil, + unlocked_at: nil, + updated_at: account.updated_at + } + end +end diff --git a/src/core/dbt-api/lib/jade/application.ex b/src/core/dbt-api/lib/jade/application.ex new file mode 100644 index 00000000..0f7b5a35 --- /dev/null +++ b/src/core/dbt-api/lib/jade/application.ex @@ -0,0 +1,41 @@ +defmodule Jade.Application do + # See https://hexdocs.pm/elixir/Application.html + # for more information on OTP Applications + @moduledoc false + + use Application + + @impl true + def start(_type, _args) do + connect_to_airflow = Application.get_env(:jade, :connect_to_airflow) + start_endpoint = Application.get_env(:jade, :start_endpoint) + + children = + [ + JadeWeb.Telemetry, + {Phoenix.PubSub, name: Jade.PubSub}, + Jade.Repo, + Datacoves.Repo, + Jade.Manifests.Cleaner + ] ++ airflow(connect_to_airflow) ++ endpoint(start_endpoint) + + # See https://hexdocs.pm/elixir/Supervisor.html + # for other strategies and supported options + opts = [strategy: :one_for_one, name: Jade.Supervisor] + Supervisor.start_link(children, opts) + end + + defp airflow(true), do: [Airflow.Repos] + defp airflow(false), do: [] + + defp endpoint(true), do: [JadeWeb.Endpoint] + defp endpoint(false), do: [] + + # Tell Phoenix to update the endpoint configuration + # whenever the application is updated. + @impl true + def config_change(changed, _new, removed) do + JadeWeb.Endpoint.config_change(changed, removed) + :ok + end +end diff --git a/src/core/dbt-api/lib/jade/auth.ex b/src/core/dbt-api/lib/jade/auth.ex new file mode 100644 index 00000000..0d2f1c2a --- /dev/null +++ b/src/core/dbt-api/lib/jade/auth.ex @@ -0,0 +1,33 @@ +defmodule Jade.Auth do + @moduledoc """ + Verifies an ApiKey against the Datacoves API and fetches its details. + """ + + require Logger + + @http_adapter Application.compile_env(:jade, :http_adapter) + + @doc """ + Fetches permissions for a user through the Datacoves token verify API. + """ + def fetch_api_key_details(bearer_token) do + headers = [{"Authorization", "Token #{bearer_token}"}, {"Content-Type", "application/json"}] + + case @http_adapter.get(verify_url(), headers) do + {:ok, %HTTPoison.Response{status_code: status_code, body: body}} when status_code < 400 -> + {:ok, Jason.decode!(body)} + + {:ok, %HTTPoison.Response{status_code: 404}} -> + {:error, :api_key_not_found} + + {:ok, %HTTPoison.Response{status_code: status_code, body: body}} -> + Logger.error("Verifying ApiKey returned #{status_code} with #{inspect(body)}") + {:error, :invalid_api_key} + + {:error, %HTTPoison.Error{reason: reason}} -> + {:error, reason} + end + end + + defp verify_url(), do: Application.get_env(:jade, :datacoves_verify_url) +end diff --git a/src/core/dbt-api/lib/jade/environments/environment.ex b/src/core/dbt-api/lib/jade/environments/environment.ex new file mode 100644 index 00000000..9e1165bc --- /dev/null +++ b/src/core/dbt-api/lib/jade/environments/environment.ex @@ -0,0 +1,32 @@ +defmodule Jade.Environments.Environment do + @moduledoc """ + The dbt Cloud Environment Schema. Its data comes from the Datacoves database. + """ + use Jade, :schema + + embedded_schema do + field :account_id, :integer + field :project_id, :integer + field :connection_id, :integer + # belongs_to :connection, NotImplemented + field :credentials_id, :integer + field :created_by_id, :integer + field :extended_attributes_id, :integer + field :repository_id, :integer + # belongs_to :repository, NotImplemented + field :name, :string + field :slug, :string + field :airflow_config, :map + field :dbt_project_subdirectory, :string + field :services, :map + field :use_custom_branch, :string + field :custom_branch, :string + field :dbt_version, :string + field :raw_dbt_version, :string + field :supports_docs, :boolean, default: false + field :state, :integer + field :custom_environment_variables, :string + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + end +end diff --git a/src/core/dbt-api/lib/jade/environments/environment_repo.ex b/src/core/dbt-api/lib/jade/environments/environment_repo.ex new file mode 100644 index 00000000..edfbbe05 --- /dev/null +++ b/src/core/dbt-api/lib/jade/environments/environment_repo.ex @@ -0,0 +1,65 @@ +defmodule Jade.Environments.EnvironmentRepo do + @moduledoc """ + The Repository for Environments. + """ + + alias Datacoves.Environments.Environment, as: DatacovesEnvironment + alias Datacoves.Environments.EnvironmentRepo + + alias Jade.Environments.Environment + + @spec list() :: [Environment.t()] + def list(), do: EnvironmentRepo.list() |> convert() + + @spec list(map()) :: list(Environment.t()) + def list(params) do + EnvironmentRepo.list(params) |> convert() + end + + @spec get(integer(), integer()) :: {:ok, Environment.t()} | {:error, :not_found} + def get(account_id, environment_id) do + with {:ok, datacoves_environment} <- EnvironmentRepo.get(account_id, environment_id) do + {:ok, convert(datacoves_environment)} + end + end + + @spec get_by_slug(binary()) :: {:ok, Environment.t()} | {:error, :not_found} + def get_by_slug(slug) do + with {:ok, datacoves_environment} <- EnvironmentRepo.get_by(slug: slug) do + {:ok, convert(datacoves_environment)} + end + end + + defp convert(environments) when is_list(environments) do + Enum.map(environments, &convert/1) + end + + defp convert(%DatacovesEnvironment{} = environment) do + airflow_config = DatacovesEnvironment.decrypt_json_field!(environment, :airflow_config) + + %Environment{ + id: environment.id, + account_id: environment.project.account_id, + connection_id: environment.id, + project_id: environment.project_id, + credentials_id: nil, + created_by_id: nil, + extended_attributes_id: nil, + repository_id: nil, + name: environment.name, + slug: environment.slug, + airflow_config: airflow_config, + dbt_project_subdirectory: environment.dbt_home_path, + services: environment.services, + use_custom_branch: false, + custom_branch: nil, + dbt_version: nil, + raw_dbt_version: nil, + supports_docs: nil, + state: nil, + custom_environment_variables: nil, + created_at: environment.created_at, + updated_at: environment.updated_at + } + end +end diff --git a/src/core/dbt-api/lib/jade/files/file.ex b/src/core/dbt-api/lib/jade/files/file.ex new file mode 100644 index 00000000..602c1f61 --- /dev/null +++ b/src/core/dbt-api/lib/jade/files/file.ex @@ -0,0 +1,35 @@ +defmodule Jade.Files.File do + use Jade, :schema + + @primary_key {:slug, :binary_id, autogenerate: true} + @derive {Phoenix.Param, key: :slug} + schema "files" do + field(:tag, :string) + field(:filename, :string) + field(:environment_slug, :string) + field(:contents, :string, virtual: true) + field(:path, :string) + + timestamps(type: :utc_datetime) + end + + @doc false + def changeset(file, attrs) do + file + |> cast(attrs, [:tag, :filename, :environment_slug, :contents]) + |> validate_required([:tag, :filename, :environment_slug, :contents]) + |> unique_constraint([:environment_slug, :tag], error_key: :tag) + |> put_path() + end + + defp put_path(%Ecto.Changeset{valid?: true} = changeset) do + environment_slug = get_field(changeset, :environment_slug) + tag = changeset |> get_field(:tag) |> String.replace(~r/[\s_:.+]/, "-") + filename = changeset |> get_field(:filename) |> String.replace(~r/[\s_:+]/, "-") + + path = "/environments/#{environment_slug}/files/#{tag}/#{filename}" + put_change(changeset, :path, path) + end + + defp put_path(changeset), do: changeset +end diff --git a/src/core/dbt-api/lib/jade/files/file_repo.ex b/src/core/dbt-api/lib/jade/files/file_repo.ex new file mode 100644 index 00000000..dab7a54a --- /dev/null +++ b/src/core/dbt-api/lib/jade/files/file_repo.ex @@ -0,0 +1,186 @@ +defmodule Jade.Files.FileRepo do + @moduledoc """ + The repository for Files. + """ + + use Jade, :repository + require Logger + import Ecto.Query + + alias Jade.Files.File + alias Jade.Storage + + @doc """ + Get a file by its slug. + """ + def get_file!(slug), do: Repo.get!(File, slug) + + @doc """ + Get file by its attributes. + """ + def get_file_by(attrs) do + query = from(f in File) + + query = + attrs + |> Enum.reduce(query, fn {k, v}, query -> where(query, [f], field(f, ^k) == ^v) end) + |> order_by([f], desc: f.inserted_at) + |> limit(1) + + query + |> Repo.all() + |> Repo.normalize_one() + end + + @doc """ + Gets files by its attributes. + """ + def get_files_by(attrs) do + query = from(f in File) + + query = + attrs + |> Enum.reduce(query, fn {k, v}, query -> where(query, [f], field(f, ^k) == ^v) end) + |> order_by([f], desc: f.inserted_at) + + query + |> Repo.all() + |> Repo.normalize_all() + end + + @doc """ + Get and download a file by its attributes. + """ + def get_and_download_file_by(attrs) do + with {:ok, file} <- get_file_by(attrs) do + download_file(file) + end + end + + @doc """ + Creates file and uploads them to storage + """ + def create_file(attrs \\ %{}) do + Ecto.Multi.new() + |> Ecto.Multi.run(:file, fn _repo, _changes -> do_create_file(attrs) end) + |> Ecto.Multi.run(:upload, fn _repo, %{file: file} -> + case upload_file(file) do + :ok -> {:ok, file} + {:error, _status, reason} -> {:error, reason} + end + end) + |> Repo.transaction() + |> case do + {:ok, %{file: file}} -> + {:ok, file} + + {:error, :file, changeset, _} -> + {:error, changeset} + + {:error, :upload, _reason, _} -> + {:error, :unprocessable_entity, "File upload failed. Please check the error in the logs."} + end + end + + @doc """ + Creates multiple files and uploads them to storage + """ + def create_files(files) do + files + |> Enum.reduce_while([], fn file_params, acc -> + case create_file(file_params) do + {:ok, file} -> + {:cont, [file | acc]} + + error -> + Logger.error("Creating multiple files failed: #{inspect(error)}") + delete_files(acc) + {:halt, error} + end + end) + |> case do + {:error, _msg} = error -> error + {:error, _status, reason} -> {:error, reason} + files -> {:ok, Enum.reverse(files)} + end + end + + defp do_create_file(attrs) do + %File{} + |> File.changeset(attrs) + |> Repo.insert() + end + + @doc """ + Updates a file and uploads it to storage + """ + def update_file(file, attrs \\ %{}) do + Ecto.Multi.new() + |> Ecto.Multi.run(:file, fn _repo, _changes -> do_update_file(file, attrs) end) + |> Ecto.Multi.run(:upload, fn _repo, %{file: file} -> + case upload_file(file) do + :ok -> {:ok, file} + {:error, _status, reason} -> {:error, reason} + end + end) + |> Repo.transaction() + |> case do + {:ok, %{file: file}} -> + {:ok, file} + + {:error, :file, changeset, _} -> + {:error, changeset} + + {:error, :upload, _reason, _} -> + {:error, :unprocessable_entity, "File upload failed. Please check the error in the logs."} + end + end + + defp do_update_file(file, attrs) do + file + |> File.changeset(attrs) + |> Repo.update() + end + + @doc """ + Deletes a file and its contents from storage + """ + def delete_file(%File{} = file) do + case Storage.delete(file.path) do + :ok -> + Repo.delete(file) + + {:error, :http_request_failed} -> + {:error, :unprocessable_entity, + "Deletion of file contents from storage failed. File record and contents remain."} + end + end + + def delete_files(files) do + Enum.each(files, &delete_file/1) + end + + defp upload_file(file) do + case Storage.upload(file.path, file.contents) do + :ok -> + :ok + + {:error, :http_request_failed} -> + delete_file(file) + {:error, :unprocessable_entity, "File upload failed. Please check the error in the logs."} + end + end + + defp download_file(file) do + case Storage.download(file.path) do + {:ok, contents} -> + {:ok, Map.put(file, :contents, contents)} + + {:error, :not_found} -> + {:error, :not_found, "File found but could not download contents from storage bucket."} + + {:error, :http_request_failed} -> + {:error, :unprocessable_entity, "The request to the storage bucket failed."} + end + end +end diff --git a/src/core/dbt-api/lib/jade/files/file_repo_test.exs b/src/core/dbt-api/lib/jade/files/file_repo_test.exs new file mode 100644 index 00000000..a3f98b51 --- /dev/null +++ b/src/core/dbt-api/lib/jade/files/file_repo_test.exs @@ -0,0 +1,93 @@ +defmodule Jade.Files.FileRepoTest do + use Jade.DataCase, async: true + + alias Jade.Files.File + alias Jade.Files.FileRepo + + @invalid_attrs %{tag: nil, filename: nil, path: nil} + describe "get_file_by/1" do + test "returns the file with the given attributes" do + file = insert(:file) + + # Ensure the file is not returned with its content + file = %{file | contents: nil} + assert {:ok, ^file} = FileRepo.get_file_by(tag: file.tag) + assert {:ok, ^file} = FileRepo.get_file_by(filename: file.filename) + assert {:ok, ^file} = FileRepo.get_file_by(path: file.path) + end + + test "returns not found error if no file matches the attributes" do + assert {:error, :not_found} == FileRepo.get_file_by(tag: "nonexistent") + end + end + + describe "get_files_by/1" do + test "returns the files with the given attributes" do + file1 = insert(:file, %{tag: "tag1", filename: "file1.txt", path: "path1"}) + file2 = insert(:file, %{tag: "tag1", filename: "file1.txt", path: "path1"}) + _ignore_file = insert(:file, %{tag: "tag2", filename: "file2.txt", path: "path2"}) + + # Ensure the file is not returned with its content + file1 = %{file1 | contents: nil} + file2 = %{file2 | contents: nil} + assert {:ok, [^file1, ^file2]} = FileRepo.get_files_by(tag: "tag1") + assert {:ok, [^file1, ^file2]} = FileRepo.get_files_by(filename: "file1.txt") + assert {:ok, [^file1, ^file2]} = FileRepo.get_files_by(path: "path1") + end + + test "returns not fount error if no files match the attributes" do + assert {:error, :not_found} == FileRepo.get_files_by(tag: "nonexistent") + end + end + + test "create_file/1 with valid data creates a file" do + valid_attrs = %{ + tag: "some tag", + filename: "some filename.pdf", + path: "some path", + contents: "testtesttest", + environment_slug: "test_environment" + } + + assert {:ok, %File{} = file} = FileRepo.create_file(valid_attrs) + assert file.tag == "some tag" + assert file.filename == "some filename.pdf" + assert file.environment_slug == "test_environment" + assert file.path == "/environments/test_environment/files/some-tag/some-filename.pdf" + end + + test "create_file/1 with invalid data returns error changeset" do + assert {:error, %Ecto.Changeset{}} = FileRepo.create_file(@invalid_attrs) + end + + test "update_file/2 with valid data updates the file" do + file = insert(:file) + + update_attrs = %{ + tag: "some updated tag", + filename: "some updated filename.json", + environment_slug: "env234", + contents: "test123test" + } + + assert {:ok, %File{} = updated_file} = FileRepo.update_file(file, update_attrs) + assert updated_file.tag == "some updated tag" + assert updated_file.filename == "some updated filename.json" + assert updated_file.contents == "test123test" + assert updated_file.environment_slug == "env234" + + assert updated_file.path == + "/environments/env234/files/some-updated-tag/some-updated-filename.json" + end + + test "update_file/2 with invalid data returns error changeset" do + file = insert(:file) + assert {:error, %Ecto.Changeset{}} = FileRepo.update_file(file, @invalid_attrs) + end + + test "delete_file/1 deletes the file" do + file = insert(:file) + assert {:ok, %File{}} = FileRepo.delete_file(file) + assert_raise Ecto.NoResultsError, fn -> FileRepo.get_file!(file.slug) end + end +end diff --git a/src/core/dbt-api/lib/jade/job_ids/job_id.ex b/src/core/dbt-api/lib/jade/job_ids/job_id.ex new file mode 100644 index 00000000..4e07d58f --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_ids/job_id.ex @@ -0,0 +1,23 @@ +defmodule Jade.JobIds.JobId do + @moduledoc """ + This schema serves as a mapper from Airflow string-based IDs to an integer ID + that we define ourselves. This way, we can query for Airflow DAGs using integer + IDs like we already do with DAGRuns/JobRuns, Projects, and Accounts as well. + + Each record keeps a map of Airflow ID + Datacoves Environment ID -> Generated Integer ID, + the primary key of the record. If an Airflow ID changes, a new record will + be created. + """ + use Jade, :schema + + schema "job_ids" do + field :environment_id, :integer + field :dag_id, :string + end + + def changeset(record, attrs) do + record + |> cast(attrs, [:environment_id, :dag_id]) + |> validate_required([:environment_id, :dag_id]) + end +end diff --git a/src/core/dbt-api/lib/jade/job_ids/job_id_repo.ex b/src/core/dbt-api/lib/jade/job_ids/job_id_repo.ex new file mode 100644 index 00000000..8f838cac --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_ids/job_id_repo.ex @@ -0,0 +1,28 @@ +defmodule Jade.JobIds.JobIdRepo do + use Jade, :repository + + alias Jade.JobIds.JobId + + def create(attrs) do + %JobId{} + |> JobId.changeset(attrs) + |> Repo.insert() + end + + def list() do + Repo.all(JobId) + end + + def get_by(attrs) do + JobId + |> Repo.get_by(attrs) + |> Repo.normalize_one() + end + + def get_or_create_by(attrs) do + case get_by(attrs) do + {:ok, record} -> {:ok, record} + _error -> attrs |> Map.new() |> create() + end + end +end diff --git a/src/core/dbt-api/lib/jade/job_ids/job_id_repo_test.exs b/src/core/dbt-api/lib/jade/job_ids/job_id_repo_test.exs new file mode 100644 index 00000000..8576b45e --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_ids/job_id_repo_test.exs @@ -0,0 +1,41 @@ +defmodule Jade.JobIds.JobIdRepoTest do + use Jade.DataCase, async: true + + alias Jade.JobIds.JobIdRepo + + describe "get_by/1" do + test "returns a record for an job_id" do + job_id = insert(:job_id) + + {:ok, result} = + JobIdRepo.get_by(environment_id: job_id.environment_id, dag_id: job_id.dag_id) + + assert result.id == job_id.id + end + + test "returns an error if no job_id exists" do + {:error, :not_found} = JobIdRepo.get_by(environment_id: 1, dag_id: "foobar") + end + end + + describe "get_or_create_by/1" do + test "returns an existing job_id" do + job_id = insert(:job_id) + + {:ok, result} = + JobIdRepo.get_or_create_by(environment_id: job_id.environment_id, dag_id: job_id.dag_id) + + assert result.id == job_id.id + + {:ok, result} = JobIdRepo.get_or_create_by(id: job_id.id) + assert result.id == job_id.id + end + + test "creates a new job_id if it doesn't exist" do + {:ok, result} = JobIdRepo.get_or_create_by(environment_id: 1, dag_id: "foobar") + assert result.id + assert result.dag_id == "foobar" + assert result.environment_id == 1 + end + end +end diff --git a/src/core/dbt-api/lib/jade/job_run_ids/job_run_id.ex b/src/core/dbt-api/lib/jade/job_run_ids/job_run_id.ex new file mode 100644 index 00000000..0d16580b --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_run_ids/job_run_id.ex @@ -0,0 +1,28 @@ +defmodule Jade.JobRunIds.JobRunId do + @moduledoc """ + This schema serves as a mapper from a composite primary key of + Datacoves Environment ID and Airflow DagRun ID to a globally unique integer, + the primary key of this schema. + + The problem this mapper solves is that we might have multiple Airflow Databases + which might have the same integer ID for different DagRuns. For example, + two Airflow Databases might have a DagRun with ID: 1. This mapper stores + the Datacoves Environment for the Airflow Postgres and Airflow's DagRun ID so + that we can fetch the DagRun from the correct Environment/Airflow database if + somebody requests it. + + This mapper is created when we list all DagRuns of an environment/airflow database. + """ + use Jade, :schema + + schema "job_run_ids" do + field :environment_id, :integer + field :dag_run_id, :integer + end + + def changeset(record, attrs) do + record + |> cast(attrs, [:environment_id, :dag_run_id]) + |> validate_required([:environment_id, :dag_run_id]) + end +end diff --git a/src/core/dbt-api/lib/jade/job_run_ids/job_run_id_repo.ex b/src/core/dbt-api/lib/jade/job_run_ids/job_run_id_repo.ex new file mode 100644 index 00000000..a5992215 --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_run_ids/job_run_id_repo.ex @@ -0,0 +1,28 @@ +defmodule Jade.JobRunIds.JobRunIdRepo do + use Jade, :repository + + alias Jade.JobRunIds.JobRunId + + def create(attrs) do + %JobRunId{} + |> JobRunId.changeset(attrs) + |> Repo.insert() + end + + def list() do + Repo.all(JobRunId) + end + + def get_by(attrs) do + JobRunId + |> Repo.get_by(attrs) + |> Repo.normalize_one() + end + + def get_or_create_by(attrs) do + case get_by(attrs) do + {:ok, record} -> {:ok, record} + _error -> attrs |> Map.new() |> create() + end + end +end diff --git a/src/core/dbt-api/lib/jade/job_run_ids/job_run_id_repo_test.exs b/src/core/dbt-api/lib/jade/job_run_ids/job_run_id_repo_test.exs new file mode 100644 index 00000000..7c5eebad --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_run_ids/job_run_id_repo_test.exs @@ -0,0 +1,44 @@ +defmodule Jade.JobRunIds.JobRunIdRepoTest do + use Jade.DataCase, async: true + + alias Jade.JobRunIds.JobRunIdRepo + + describe "get_by/1" do + test "returns a record for a composite key" do + job_run_id = insert(:job_run_id) + + {:ok, result} = + JobRunIdRepo.get_by( + dag_run_id: job_run_id.dag_run_id, + environment_id: job_run_id.environment_id + ) + + assert result.id == job_run_id.id + end + + test "returns an error if no job_run_id exists" do + {:error, :not_found} = JobRunIdRepo.get_by(dag_run_id: 1, environment_id: 1) + end + end + + describe "get_or_create_by/1" do + test "returns an existing job_run_id" do + job_run_id = insert(:job_run_id) + + {:ok, result} = + JobRunIdRepo.get_or_create_by( + dag_run_id: job_run_id.dag_run_id, + environment_id: job_run_id.environment_id + ) + + assert result.id == job_run_id.id + end + + test "creates a new job_run_id if it doesn't exist" do + {:ok, result} = JobRunIdRepo.get_or_create_by(dag_run_id: 1, environment_id: 2) + assert result.id + assert result.dag_run_id == 1 + assert result.environment_id == 2 + end + end +end diff --git a/src/core/dbt-api/lib/jade/job_runs/job_run.ex b/src/core/dbt-api/lib/jade/job_runs/job_run.ex new file mode 100644 index 00000000..4878561d --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_runs/job_run.ex @@ -0,0 +1,67 @@ +defmodule Jade.JobRuns.JobRun do + @moduledoc """ + The dbt Cloud Job Schema. Its data comes from Airflow DAGRuns. + """ + use Jade, :schema + + alias Jade.Jobs.Job + + embedded_schema do + # BigEye internal field. Same as `:id` + field :dbt_job_run_ext_id, :integer + belongs_to :job, Job + field :dag_id, :string + field :dag_run_id, :integer + field :dag_run_run_id, :string + + field :trigger_id, :integer + # has_one :trigger, NotImplemented + + field :environment_id, :integer + field :environment_slug, :string + # has_many :run_steps, NotImplemented + field :account_id, :integer + field :completed_at, :utc_datetime + field :project_id, :integer + field :job_definition_id, :integer + field :status, :integer + field :dbt_version, :string, default: "1.6.0-latest" + field :git_branch, :string + field :git_sha, :string + field :status_message, :string + field :owner_thread_id, :string + field :executed_by_thread_id, :string + field :deferring_run_id, :integer + field :artifacts_saved, :boolean, default: false + field :artifact_s3_path, :string + field :has_docs_generated, :boolean, default: false + field :has_sources_generated, :boolean, default: false + field :notifications_sent, :boolean, default: false + field :blocked_by, {:array, :integer} + field :scribe_enabled, :boolean, default: false + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + field :queued_at, :utc_datetime + field :dequeued_at, :utc_datetime + field :started_at, :utc_datetime + field :finished_at, :utc_datetime + field :last_checked_at, :utc_datetime + field :last_heartbeat_at, :utc_datetime + field :should_start_at, :utc_datetime + field :status_humanized, :string + field :in_progress, :boolean + field :is_complete, :boolean + field :is_success, :boolean + field :is_error, :boolean + field :is_cancelled, :boolean + field :is_running, :boolean + field :duration, :string + field :queued_duration, :string + field :run_duration, :string + field :duration_humanized, :string + field :queued_duration_humanized, :string + field :run_duration_humanized, :string + field :created_at_humanized, :string + field :finished_at_humanized, :string + end +end diff --git a/src/core/dbt-api/lib/jade/job_runs/job_run_repo.ex b/src/core/dbt-api/lib/jade/job_runs/job_run_repo.ex new file mode 100644 index 00000000..7135f659 --- /dev/null +++ b/src/core/dbt-api/lib/jade/job_runs/job_run_repo.ex @@ -0,0 +1,178 @@ +defmodule Jade.JobRuns.JobRunRepo do + @moduledoc """ + The Repository for JobRuns. + """ + + alias Airflow.DagRuns.DagRun + alias Airflow.DagRuns.DagRunRepo + + alias Jade.JobIds.JobIdRepo + alias Jade.JobRunIds.JobRunIdRepo + alias Jade.Environments.Environment + alias Jade.Environments.EnvironmentRepo + alias Jade.JobRuns.JobRun + alias Jade.Jobs.Job + + @spec list(map()) :: list(JobRun.t()) + def list(params) do + environments = EnvironmentRepo.list(params) + + Enum.reduce(environments, [], fn environment, job_runs -> + result = DagRunRepo.list(environment) |> convert(environment) + job_runs ++ result + end) + end + + @spec get(integer(), integer()) :: {:ok, JobRun.t()} | {:error, :not_found} + def get(account_id, job_run_id) do + with {:ok, job_run_id} <- JobRunIdRepo.get_by(id: job_run_id), + {:ok, environment} <- EnvironmentRepo.get(account_id, job_run_id.environment_id), + {:ok, dag_run} <- DagRunRepo.get(environment, job_run_id.dag_run_id) do + {:ok, convert(dag_run, environment)} + end + end + + def get_by(environment, attrs) do + with {:ok, dag_run} <- DagRunRepo.get_by(environment, attrs) do + {:ok, convert(dag_run, environment)} + end + end + + @spec get_most_recent_for_job(Environment.t(), Job.t()) :: + {:ok, JobRun.t()} | {:error, :not_found} + def get_most_recent_for_job(environment, job) do + with {:ok, dag_run} <- DagRunRepo.get_most_recent_for_dag(environment, job.dag_id) do + {:ok, convert(dag_run, environment)} + end + end + + @spec get_most_recent_completed_for_job(Environment.t(), Job.t()) :: + {:ok, JobRun.t()} | {:error, :not_found} + def get_most_recent_completed_for_job(environment, job) do + with {:ok, dag_run} <- DagRunRepo.get_most_recent_for_dag(environment, job.dag_id, :success) do + {:ok, convert(dag_run, environment)} + end + end + + @spec get_latest_for_project(map()) :: {:ok, JobRun.t()} | {:error, :not_found} + def get_latest_for_project(params) do + environments = EnvironmentRepo.list(params) + + dag_runs = + Enum.reduce(environments, [], fn environment, dag_runs -> + case DagRunRepo.get_most_recent(environment, :success) do + {:ok, dag_run} -> + [{dag_run, environment} | dag_runs] + + _not_found -> + dag_runs + end + end) + + case dag_runs do + [] -> + {:error, :not_found} + + dag_runs -> + [{latest_dag_run, environment} | _rest] = + Enum.sort_by( + dag_runs, + fn {dag_run, _environment} -> dag_run.end_date end, + {:desc, DateTime} + ) + + {:ok, convert(latest_dag_run, environment)} + end + end + + defp convert(job_runs, environment) when is_list(job_runs) do + Enum.map(job_runs, &convert(&1, environment)) + end + + defp convert(%DagRun{} = dag_run, environment) do + {:ok, job_run_id} = + JobRunIdRepo.get_or_create_by(environment_id: environment.id, dag_run_id: dag_run.id) + + {:ok, job_id} = + JobIdRepo.get_or_create_by(environment_id: environment.id, dag_id: dag_run.dag_id) + + status = convert_state(dag_run.state) + + %JobRun{ + id: job_run_id.id, + job_id: job_id.id, + dag_id: dag_run.dag_id, + dag_run_id: dag_run.id, + dag_run_run_id: dag_run.run_id, + # Fields needed by BigEye + status: status, + dbt_job_run_ext_id: dag_run.id, + started_at: dag_run.start_date, + completed_at: dag_run.end_date, + git_sha: nil, + # ---------------------- + # TODO: Get these fields once we can connect the Environment to DAGs + trigger_id: nil, + environment_id: environment.id, + environment_slug: environment.slug, + account_id: environment.account_id, + project_id: environment.project_id, + git_branch: nil, + # ------------------ + job_definition_id: dag_run.dag_id, + status_message: nil, + owner_thread_id: nil, + executed_by_thread_id: nil, + deferring_run_id: nil, + # TODO: Parse it from the manifest.json + dbt_version: nil, + # TODO: Set this to true once we uploaded the manifest to S3 + artifacts_saved: false, + artifact_s3_path: nil, + # ---------------------- + has_docs_generated: false, + has_sources_generated: false, + notifications_sent: false, + blocked_by: nil, + scribe_enabled: nil, + created_at: dag_run.queued_at, + updated_at: dag_run.queued_at, + queued_at: dag_run.queued_at, + dequeued_at: dag_run.end_date, + finished_at: dag_run.end_date, + last_checked_at: nil, + last_heartbeat_at: nil, + should_start_at: dag_run.queued_at, + status_humanized: nil, + in_progress: dag_run.state == :running, + is_complete: dag_run.state in [:success, :failed], + is_success: dag_run.state == :success, + is_error: dag_run.state == :failed, + is_cancelled: nil, + is_running: dag_run.state == :running, + duration: calc_duration(dag_run.queued_at, dag_run.end_date), + queued_duration: calc_duration(dag_run.queued_at, dag_run.start_date), + run_duration: calc_duration(dag_run.start_date, dag_run.end_date), + duration_humanized: nil, + queued_duration_humanized: nil, + run_duration_humanized: nil, + created_at_humanized: nil, + finished_at_humanized: nil + } + end + + defp convert_state(dag_run_state) do + case dag_run_state do + :success -> 1 + :failed -> 2 + :queued -> 3 + :running -> 5 + end + end + + defp calc_duration(%DateTime{} = start_time, %DateTime{} = end_time) do + DateTime.diff(end_time, start_time) + end + + defp calc_duration(_start_time, _end_time), do: nil +end diff --git a/src/core/dbt-api/lib/jade/jobs/job.ex b/src/core/dbt-api/lib/jade/jobs/job.ex new file mode 100644 index 00000000..68b321f2 --- /dev/null +++ b/src/core/dbt-api/lib/jade/jobs/job.ex @@ -0,0 +1,59 @@ +defmodule Jade.Jobs.Job do + @moduledoc """ + The dbt Cloud Job Schema. Its data comes from Airflow DAGs with the exception + of its integer ID, which we generate as an Jade.JobIds.JobId. The JobId keeps + the mapping of the generated integer Id to Airflow's string-based ID. + """ + use Jade, :schema + + alias Jade.JobRuns.JobRun + + @valid_triggers [:github_webhook, :schedule, :git_provider_webhook] + + embedded_schema do + field :project_id, :integer + # BigEye internal field. Same as `:id` + field :dbt_job_ext_id, :integer + # has_one project, NotImplemented + + field :environment_id, :integer + # has_one :environment, NotImplemented + + field :dag_id, :string + field :deferring_job_definition_id, :integer + field :deferring_environment_id, :integer + field :lifecycle_webhooks, :boolean + field :lifecycle_webhooks_url, :string + + field :account_id, :integer + # has_one :account, NotImplemented + + field :name, :string + field :description, :string + field :dbt_version, :string + field :raw_dbt_version, :string + field :triggers, Ecto.Enum, values: @valid_triggers + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + field :schedule, :string + field :settings, :map + field :execution, :map + # Valid values not specified in docs. + field :state, :integer + field :generate_docs, :boolean + field :run_generate_sources, :boolean + field :most_recent_completed_run, :integer + field :most_recent_run, :integer + field :is_deferrable, :boolean + field :deactivated, :boolean + field :run_failure_count, :integer + # Valid values not specified in docs. + field :job_type, :string + field :triggers_on_draft_pr, :boolean + + has_one :most_recent_job_run, JobRun + has_one :most_recent_completed_job_run, JobRun + end + + def valid_triggers(), do: @valid_triggers +end diff --git a/src/core/dbt-api/lib/jade/jobs/job_repo.ex b/src/core/dbt-api/lib/jade/jobs/job_repo.ex new file mode 100644 index 00000000..b41c0d5f --- /dev/null +++ b/src/core/dbt-api/lib/jade/jobs/job_repo.ex @@ -0,0 +1,97 @@ +defmodule Jade.Jobs.JobRepo do + @moduledoc """ + The Repository for Jobs. + """ + + alias Airflow.Dags.Dag + alias Airflow.Dags.DagRepo + + alias Jade.JobIds.JobIdRepo + alias Jade.Environments.EnvironmentRepo + alias Jade.JobRuns.JobRunRepo + alias Jade.Jobs.Job + + @spec list(map()) :: list(Job.t()) + def list(params) do + environments = EnvironmentRepo.list(params) + + Enum.reduce(environments, [], fn environment, jobs -> + result = DagRepo.list(environment, params) |> convert(environment) + jobs ++ result + end) + end + + @spec get(integer(), integer()) :: {:ok, Job.t()} | {:error, :not_found} + def get(account_id, job_id) do + with {:ok, job_id} <- JobIdRepo.get_by(id: job_id), + {:ok, environment} <- EnvironmentRepo.get(account_id, job_id.environment_id), + {:ok, dag} <- DagRepo.get(environment, job_id.dag_id), + job <- convert(dag, environment), + job <- add_most_recent_job_run(environment, job), + job <- add_most_recent_completed_job_run(environment, job) do + {:ok, job} + end + end + + defp add_most_recent_job_run(environment, job) do + case JobRunRepo.get_most_recent_for_job(environment, job) do + {:ok, job_run} -> Map.merge(job, %{most_recent_job_run: job_run}) + _error -> job + end + end + + defp add_most_recent_completed_job_run(environment, job) do + case JobRunRepo.get_most_recent_completed_for_job(environment, job) do + {:ok, job_run} -> Map.merge(job, %{most_recent_completed_job_run: job_run}) + _error -> job + end + end + + defp convert(dags, environment) when is_list(dags) do + Enum.map(dags, &convert(&1, environment)) + end + + defp convert(%Dag{} = dag, environment) do + {:ok, job_id} = JobIdRepo.get_or_create_by(environment_id: environment.id, dag_id: dag.dag_id) + state = if dag.is_active, do: 1, else: 0 + + %Job{ + id: job_id.id, + # Fields needed by BigEye + project_id: environment.project_id, + name: dag.dag_id, + dbt_job_ext_id: job_id.id, + # ----------------------- + environment_id: environment.id, + dag_id: dag.dag_id, + deferring_job_definition_id: nil, + deferring_environment_id: nil, + lifecycle_webhooks: nil, + lifecycle_webhooks_url: nil, + account_id: environment.account_id, + description: dag.description, + # TODO: Parse this from the Manifest once we have it. + dbt_version: nil, + # TODO: Parse this from the Manifest once we have it. + raw_dbt_version: nil, + triggers: nil, + created_at: dag.last_parsed_time, + updated_at: dag.last_parsed_time, + schedule: dag.schedule_interval, + settings: nil, + execution: nil, + state: state, + generate_docs: nil, + run_generate_sources: nil, + # TODO: Preload most recent runs + most_recent_completed_run: nil, + most_recent_run: nil, + is_deferrable: nil, + deactivated: !dag.is_active, + # TODO: Load failed run count + run_failure_count: nil, + job_type: nil, + triggers_on_draft_pr: false + } + end +end diff --git a/src/core/dbt-api/lib/jade/manifests/cleaner.ex b/src/core/dbt-api/lib/jade/manifests/cleaner.ex new file mode 100644 index 00000000..faea9692 --- /dev/null +++ b/src/core/dbt-api/lib/jade/manifests/cleaner.ex @@ -0,0 +1,36 @@ +defmodule Jade.Manifests.Cleaner do + @moduledoc """ + A background job that deletes old manifest contents from the database. + It only keeps the manifest contents of the last successful JobRun per DAG. + """ + use GenServer + + require Logger + + alias Jade.Manifests.ManifestRepo + + # Run every minute + @interval :timer.minutes(1) + + def start_link(init_args) do + GenServer.start_link(__MODULE__, [init_args]) + end + + def init(_args) do + schedule_run() + {:ok, :initial_state} + end + + defp schedule_run() do + Process.send_after(self(), :clean, @interval) + end + + def handle_info(:clean, _state) do + schedule_run() + + count = ManifestRepo.delete_old_manifest_contents() + Logger.debug("Deleted #{count} old manifest contents") + + {:noreply, :cleaned} + end +end diff --git a/src/core/dbt-api/lib/jade/manifests/manifest.ex b/src/core/dbt-api/lib/jade/manifests/manifest.ex new file mode 100644 index 00000000..d4f37f58 --- /dev/null +++ b/src/core/dbt-api/lib/jade/manifests/manifest.ex @@ -0,0 +1,54 @@ +defmodule Jade.Manifests.Manifest do + use Jade, :schema + + schema "manifests" do + field :slug, :binary_id + field :tag, :string + field :account_id, :integer + field :project_id, :integer + field :environment_slug, :string + # This is the DAG ID we get from Airflow (e.g. "yaml_sample_dag") + field :dag_id, :string + # This is the internal, integer ID for the DAGRun (e.g. 1) + field :dag_run_id, :integer + # This is the DAGRun ID we get from Airflow (e.g. "manual__2023-12-02T09:49:46.105347+00:00") + field :dag_run_run_id, :string + field :content, :map, load_in_query: false + + belongs_to :job_run, Jade.JobRunIds.JobRunId, foreign_key: :job_run_id + + timestamps(type: :utc_datetime) + end + + def filepath(%__MODULE__{id: id, job_run_id: nil} = m) when not is_nil(id) do + "accounts/#{m.account_id}/environments/#{m.environment_slug}/manifests/#{m.id}/manifest.json" + end + + def filepath(%__MODULE__{dag_run_run_id: dag_run_run_id} = m) when is_binary(dag_run_run_id) do + safe_dag_run_run_id = String.replace(m.dag_run_run_id, ~r/[_:.+]/, "-") + + "accounts/#{m.account_id}/environments/#{m.environment_slug}/dags/#{m.dag_id}/dag_runs/#{safe_dag_run_run_id}/manifest.json" + end + + @doc false + def changeset(manifest, attrs) do + manifest + |> cast(attrs, [ + :tag, + :account_id, + :project_id, + :environment_slug, + :dag_id, + :dag_run_id, + :dag_run_run_id, + :job_run_id, + :content + ]) + |> validate_required([ + :account_id, + :project_id, + :environment_slug + ]) + |> unique_constraint([:environment_slug, :tag], error_key: :tag) + end +end diff --git a/src/core/dbt-api/lib/jade/manifests/manifest_repo.ex b/src/core/dbt-api/lib/jade/manifests/manifest_repo.ex new file mode 100644 index 00000000..85f7ae8b --- /dev/null +++ b/src/core/dbt-api/lib/jade/manifests/manifest_repo.ex @@ -0,0 +1,172 @@ +defmodule Jade.Manifests.ManifestRepo do + @moduledoc """ + The Repository for Manifests. + """ + use Jade, :repository + + alias Jade.Environments.Environment + alias Jade.Environments.EnvironmentRepo + alias Jade.JobRuns.JobRunRepo + alias Jade.Manifests.Manifest + alias Jade.Projects.Project + + def create(environment_slug, dag_id, run_id, content, tag \\ nil) do + with {:ok, environment} <- EnvironmentRepo.get_by_slug(environment_slug), + {:ok, job_run} <- JobRunRepo.get_by(environment, dag_id: dag_id, run_id: run_id), + {:ok, manifest} <- + do_create(%{environment: environment, job_run: job_run, tag: tag, content: content}) do + {:ok, manifest} + end + end + + def create(environment_slug, content, tag \\ nil) do + with {:ok, environment} <- EnvironmentRepo.get_by_slug(environment_slug), + {:ok, manifest} <- + do_create(%{environment: environment, job_run: nil, tag: tag, content: content}) do + {:ok, manifest} + end + end + + @spec get(integer()) :: {:ok, Manifest.t()} | {:error, :not_found} + def get(id) do + Manifest + |> Repo.get(id) + |> Repo.normalize_one() + end + + @spec get_by(list()) :: {:ok, Manifest.t()} | {:error, :not_found} + def get_by(attrs) do + Manifest + |> Repo.get_by(attrs) + |> Repo.normalize_one() + end + + @spec get_latest_for_environment(Environment.t()) :: {:ok, Manifest.t()} | {:error, :not_found} + def get_latest_for_environment(%Environment{} = environment) do + Manifest + |> where(environment_slug: ^environment.slug) + |> order_by(desc: :inserted_at) + |> limit(1) + |> Repo.one() + |> Repo.normalize_one() + end + + @spec get_latest_for_project(Project.t()) :: {:ok, Manifest.t()} | {:error, :not_found} + def get_latest_for_project(%Project{} = project) do + Manifest + |> where(project_id: ^project.id) + |> order_by(desc: :inserted_at) + |> limit(1) + |> Repo.one() + |> Repo.normalize_one() + end + + @spec get_full_content(manifest :: Manifest.t()) :: map() + def get_full_content(%Manifest{} = manifest) do + Manifest + |> where(id: ^manifest.id) + |> select([:content]) + |> Repo.one() + |> Map.get(:content) + end + + @spec get_minimal_content(manifest :: struct()) :: map() + def get_minimal_content(%Manifest{} = manifest) do + base_query = from(m in Manifest, where: m.id == ^manifest.id) + + partial_query = + from(m in base_query, + select: %{ + # "id" => m.id, + # "account_id" => m.account_id, + # "project_id" => m.project_id, + # "environment_slug" => m.environment_slug, + # "dag_id" => m.dag_id, + # "dag_run_id" => m.dag_run_run_id, + "metadata" => m.content["metadata"], + "nodes" => m.content["nodes"] + } + ) + + Repo.one(partial_query) + end + + @spec upload_file(Manifest.t(), binary()) :: :ok | {:error, any()} + def upload_file(%Manifest{} = manifest, content) do + filepath = Manifest.filepath(manifest) + Jade.Storage.upload(filepath, content) + end + + @spec download_file(Manifest.t()) :: {:ok, binary()} | {:error, any()} + def download_file(%Manifest{} = manifest) do + filepath = Manifest.filepath(manifest) + Jade.Storage.download(filepath) + end + + @doc """ + Deletes the contents of all manifests except the latest one per DAG. + """ + def delete_old_manifest_contents() do + latest_manifests = + from(m in Manifest, + group_by: [m.dag_id, m.id], + order_by: [desc: m.inserted_at], + distinct: m.dag_id, + select: %{id: m.id, dag_id: m.dag_id} + ) + |> Repo.all() + + now = DateTime.utc_now() + + Enum.reduce(latest_manifests, 0, fn %{id: id, dag_id: dag_id}, counter -> + query = + from(m in Manifest, where: m.dag_id == ^dag_id and m.id != ^id and not is_nil(m.content)) + + {count, nil} = Repo.update_all(query, set: [content: nil, updated_at: now]) + counter + count + end) + end + + defp do_create(%{content: content} = attrs) do + Repo.transact(fn -> + with {:ok, manifest} <- create_manifest(attrs), + :ok <- upload_file(manifest, content) do + {:ok, manifest} + end + end) + end + + def create_manifest(%{environment: environment, job_run: nil, tag: tag, content: content} = _attrs) do + attrs = %{ + account_id: environment.account_id, + project_id: environment.project_id, + environment_slug: environment.slug, + tag: tag, + content: Jason.decode!(content) + } + + %Manifest{} + |> Manifest.changeset(attrs) + # Return the struct to fetch the autogenerated slug. + |> Repo.insert(returning: true) + end + + def create_manifest(%{environment: environment, job_run: job_run, tag: tag, content: content} = _attrs) do + attrs = %{ + account_id: environment.account_id, + project_id: environment.project_id, + environment_slug: environment.slug, + dag_id: job_run.dag_id, + dag_run_id: job_run.dag_run_id, + dag_run_run_id: job_run.dag_run_run_id, + job_run_id: job_run.id, + tag: tag, + content: Jason.decode!(content) + } + + %Manifest{} + |> Manifest.changeset(attrs) + # Return the struct to fetch the autogenerated slug. + |> Repo.insert(returning: true) + end +end diff --git a/src/core/dbt-api/lib/jade/manifests/manifest_repo_test.exs b/src/core/dbt-api/lib/jade/manifests/manifest_repo_test.exs new file mode 100644 index 00000000..73e1e74b --- /dev/null +++ b/src/core/dbt-api/lib/jade/manifests/manifest_repo_test.exs @@ -0,0 +1,177 @@ +defmodule Jade.Manifests.ManifestRepoTest do + use Jade.DataCase, async: false + + alias Jade.JobRunIds.JobRunIdRepo + alias Jade.Manifests.ManifestRepo + + @content File.read!("./test/support/fixtures/manifest.json") + + describe "create/1" do + test "creates a new manifest" do + attrs = insert_two_accounts_with_repos() + + # Another DagRun with the same run_id but different dag_id to test that we select the correct DagRun. + insert(:dag_run, repo: attrs.repo_1, run_id: "manual__2023-12-02T09:49:46.105347+00:00") + + dag_run = + insert(:dag_run, repo: attrs.repo_1, run_id: "manual__2023-12-02T09:49:46.105347+00:00") + + {:ok, res_manifest} = + ManifestRepo.create(attrs.environment_1.slug, dag_run.dag_id, dag_run.run_id, @content) + + {:ok, res_manifest} = ManifestRepo.get(res_manifest.id) + assert res_manifest.account_id == attrs.account_1.id + assert res_manifest.environment_slug == attrs.environment_1.slug + assert res_manifest.dag_id == dag_run.dag_id + assert res_manifest.dag_run_id == dag_run.id + assert res_manifest.dag_run_run_id == dag_run.run_id + + assert %{"metadata" => %{"dbt_version" => "1.6.9"}} = + ManifestRepo.get_full_content(res_manifest) + + {:ok, job_run_id} = JobRunIdRepo.get_by(id: res_manifest.job_run_id) + assert job_run_id.environment_id == attrs.environment_1.id + assert job_run_id.dag_run_id == dag_run.id + end + end + + describe "get_by/1" do + test "returns a manifest for a given job_run_id" do + manifest = insert(:manifest) + {:ok, res_manifest} = ManifestRepo.get_by(job_run_id: manifest.job_run_id) + assert res_manifest.id == manifest.id + end + + test "returns an error if no manifest exists for the given job_run_id" do + {:error, :not_found} = ManifestRepo.get_by(job_run_id: 1) + end + end + + describe "get/1" do + test "returns a manifest for a given id" do + manifest = insert(:manifest) + {:ok, res_manifest} = ManifestRepo.get(manifest.id) + assert res_manifest.id == manifest.id + end + + test "returns an error if no manifest exists for the given id" do + {:error, :not_found} = ManifestRepo.get(1) + end + end + + describe "get_minimal_content/1" do + test "returns the trimmed content of a manifest" do + content = Jason.decode!(@content) + manifest = insert(:manifest, content: content) + + result = ManifestRepo.get_minimal_content(manifest) + + assert %{ + # "exposures" => %{ + # "exposure.balboa.customer_loans" => %{ + # "fqn" => ["balboa", "L4_exposures", "customer_loans"] + # # removed other fields for brevity + # }, + # "exposure.balboa.loans_analysis" => %{ + # "fqn" => ["balboa", "L4_exposures", "loans_analysis"] + # } + # }, + # "group_map" => %{}, + # "groups" => %{}, + "metadata" => %{ + "generated_at" => "2024-01-18T14:38:12.611300Z", + "project_name" => "balboa" + # removed other fields for brevity + }, + "nodes" => %{ + "model.balboa.base_cases" => %{ + "database" => "BALBOA_DEV", + "fqn" => ["balboa", "L2_bays", "covid_observations", "base_cases"], + "schema" => "gomezn" + }, + "model.balboa.country_populations" => %{ + "database" => "BALBOA_DEV", + "fqn" => ["balboa", "L1_inlets", "country_data", "country_populations"], + "schema" => "gomezn" + } + } + } = result + end + + test "returns an empty map if a manifest has no content" do + manifest = insert(:manifest, content: nil) + + result = ManifestRepo.get_minimal_content(manifest) + + assert %{ + # "exposures" => nil, + # "group_map" => nil, + # "groups" => nil, + "metadata" => nil, + "nodes" => nil + } = result + end + end + + describe "delete_old_manifest_contents" do + test "deletes old manifest contents" do + content = Jason.decode!(@content) + + manifest_1 = + insert(:manifest, + dag_id: "yaml_example_dag", + content: content, + inserted_at: ~U[2024-01-01 10:00:00Z] + ) + + manifest_2 = + insert(:manifest, + dag_id: "yaml_example_dag", + content: content, + inserted_at: ~U[2024-01-01 11:00:00Z] + ) + + manifest_3 = + insert(:manifest, + dag_id: "yaml_example_dag", + content: content, + inserted_at: ~U[2024-01-01 12:00:00Z] + ) + + manifest_4 = + insert(:manifest, + dag_id: "another_dag", + content: content, + inserted_at: ~U[2024-01-01 13:00:00Z] + ) + + manifest_5 = + insert(:manifest, + dag_id: "another_dag", + content: content, + inserted_at: ~U[2024-01-01 14:00:00Z] + ) + + manifest_6 = + insert(:manifest, + dag_id: "dag_with_only_one_manifest", + content: content, + inserted_at: ~U[2024-01-01 15:00:00Z] + ) + + ManifestRepo.delete_old_manifest_contents() + + # The first two manifest contents should have been deleted + assert ManifestRepo.get_full_content(manifest_1) == nil + assert ManifestRepo.get_full_content(manifest_2) == nil + assert ManifestRepo.get_full_content(manifest_3) == content + + # The first manifest content should have been deleted + assert ManifestRepo.get_full_content(manifest_4) == nil + assert ManifestRepo.get_full_content(manifest_5) == content + + # The manifest content should not have been deleted + assert ManifestRepo.get_full_content(manifest_6) == content + end + end +end diff --git a/src/core/dbt-api/lib/jade/projects/project.ex b/src/core/dbt-api/lib/jade/projects/project.ex new file mode 100644 index 00000000..fa74573f --- /dev/null +++ b/src/core/dbt-api/lib/jade/projects/project.ex @@ -0,0 +1,32 @@ +defmodule Jade.Projects.Project do + use Jade, :schema + + embedded_schema do + field :name, :string + field :slug, :string + field :account_id, :integer + field :connection_id, :integer + # has_one :connection, NotImplemented + field :repository_id, :integer + # has_one :repository, NotImplemented + field :semantic_layer_id, :integer + + # The dbt/Datacoves Project ID + field :integration_entity_id, :integer + + field :skipped_setup, :boolean + field :state, :integer + field :dbt_project_subdirectory, :string + + # has_one :group_permissions, NotImplemented + + field :docs_job_id, :integer + # has_one :docs_job, NotImplemented + + field :freshness_job_id, :integer + # has_one :freshness_job, NotImplemented + + field :created_at, :utc_datetime + field :updated_at, :utc_datetime + end +end diff --git a/src/core/dbt-api/lib/jade/projects/project_repo.ex b/src/core/dbt-api/lib/jade/projects/project_repo.ex new file mode 100644 index 00000000..a9260798 --- /dev/null +++ b/src/core/dbt-api/lib/jade/projects/project_repo.ex @@ -0,0 +1,62 @@ +defmodule Jade.Projects.ProjectRepo do + @moduledoc """ + The Repository for Projects. + + Fetches the Project data from the defined adapter. + The adapter fetches the project data from the Datacoves Postgres + database or mocks them in our tests. We convert the Datacoves + Project schema to the Jade Project schema before returning it. + """ + + alias Datacoves.Projects.Project, as: DatacovesProject + alias Datacoves.Projects.ProjectRepo + + alias Jade.Projects.Project, as: JadeProject + + @spec list(map()) :: list(JadeProject.t()) + def list(params) do + ProjectRepo.list(params) |> convert() + end + + @spec get(integer(), integer()) :: {:ok, JadeProject.t()} | {:error, :not_found} + def get(account_id, project_id) do + with {:ok, project} <- ProjectRepo.get_by(id: project_id, account_id: account_id) do + {:ok, convert(project)} + end + end + + @spec get_by(keyword()) :: {:ok, JadeProject.t()} | {:error, :not_found} + def get_by(attrs) do + with {:ok, project} <- ProjectRepo.get_by(attrs) do + {:ok, convert(project)} + end + end + + defp convert(projects) when is_list(projects) do + Enum.map(projects, &convert/1) + end + + defp convert(%DatacovesProject{} = project) do + %JadeProject{ + id: project.id, + # Fields needed by BigEye + account_id: project.account_id, + name: project.name, + slug: project.slug, + integration_entity_id: project.id, + # ---------------------- + # TODO: Decide on which Environment of many to use here. + connection_id: nil, + repository_id: project.repository_id, + semantic_layer_id: nil, + skipped_setup: false, + state: nil, + # TODO: Fetch from Environment, but which one? + dbt_project_subdirectory: nil, + docs_job_id: nil, + freshness_job_id: nil, + created_at: project.created_at, + updated_at: project.updated_at + } + end +end diff --git a/src/core/dbt-api/lib/jade/repo.ex b/src/core/dbt-api/lib/jade/repo.ex new file mode 100644 index 00000000..a637b893 --- /dev/null +++ b/src/core/dbt-api/lib/jade/repo.ex @@ -0,0 +1,35 @@ +defmodule Jade.Repo do + use Ecto.Repo, + otp_app: :jade, + adapter: Ecto.Adapters.Postgres + + @doc """ + A small wrapper around `Repo.transaction/2'. + + Commits the transaction if the lambda returns `:ok` or `{:ok, result}`, + rolling it back if the lambda returns `:error` or `{:error, reason}`. In both + cases, the function returns the result of the lambda. + """ + @spec transact((-> any()), keyword()) :: {:ok, any()} | {:error, any()} + def transact(fun, opts \\ []) do + transaction( + fn -> + case fun.() do + {:ok, value} -> value + :ok -> :transaction_commited + {:error, reason} -> rollback(reason) + :error -> rollback(:transaction_rollback_error) + end + end, + opts + ) + end + + def normalize_all([]), do: {:error, :not_found} + def normalize_all(records), do: {:ok, records} + + def normalize_one(nil), do: {:error, :not_found} + def normalize_one([]), do: {:error, :not_found} + def normalize_one([record | _]), do: {:ok, record} + def normalize_one(record), do: {:ok, record} +end diff --git a/src/core/dbt-api/lib/jade/storage/adapter.ex b/src/core/dbt-api/lib/jade/storage/adapter.ex new file mode 100644 index 00000000..9fd4408d --- /dev/null +++ b/src/core/dbt-api/lib/jade/storage/adapter.ex @@ -0,0 +1,14 @@ +defmodule Jade.Storage.Adapter do + @moduledoc """ + The Behaviour for Storage Adapters. + """ + + @type filename :: binary() + @type bucket :: binary() + @type contents :: binary() + @type response :: map() + + @callback upload(bucket, filename, contents) :: {:ok, response} | {:error, any()} + @callback download(bucket, filename) :: {:ok, contents} | {:error, any()} + @callback delete(bucket, filename) :: {:ok, response} | {:error, any()} +end diff --git a/src/core/dbt-api/lib/jade/storage/blob.ex b/src/core/dbt-api/lib/jade/storage/blob.ex new file mode 100644 index 00000000..e21f3bda --- /dev/null +++ b/src/core/dbt-api/lib/jade/storage/blob.ex @@ -0,0 +1,37 @@ +defmodule Jade.Storage.Blob do + @moduledoc """ + The Storage Adapter for Azure Blob, used in production. + """ + + @behaviour Jade.Storage.Adapter + + @impl true + def upload(bucket, filename, contents) do + filename = sanitize_filename(filename) + Azurex.Blob.put_blob(filename, contents, "text/plain", bucket) + end + + @impl true + def download(bucket, filename) do + filename = sanitize_filename(filename) + + with {:ok, contents} <- Azurex.Blob.get_blob(filename, bucket) do + {:ok, %{status_code: 200, body: contents}} + end + end + + @impl true + def delete(bucket, filename) do + filename = sanitize_filename(filename) + + case Azurex.Blob.delete_blob(filename, bucket) do + :ok -> {:ok, %{status_code: 204, body: ""}} + {:error, :not_found} -> {:ok, %{status_code: 204, body: ""}} + error -> error + end + end + + # Azurex.Blob expects a filename without leading "/" + defp sanitize_filename("/" <> filename), do: filename + defp sanitize_filename(filename), do: filename +end diff --git a/src/core/dbt-api/lib/jade/storage/minio.ex b/src/core/dbt-api/lib/jade/storage/minio.ex new file mode 100644 index 00000000..2c75a422 --- /dev/null +++ b/src/core/dbt-api/lib/jade/storage/minio.ex @@ -0,0 +1,54 @@ +defmodule Jade.Storage.Minio do + @moduledoc """ + The Storage Adapter for Minio used in local development. + """ + + @behaviour Jade.Storage.Adapter + @http_adapter Application.compile_env(:jade, :http_adapter) + + @impl true + def upload(bucket, filename, contents) do + with {:ok, url} <- get_upload_url(bucket, filename) do + @http_adapter.put(url, contents) + end + end + + defp get_upload_url(bucket, filename) do + Minio.presign_put_object( + client(), + bucket_name: bucket, + object_name: filename + ) + end + + @impl true + def download(bucket, filename) do + with {:ok, url} <- get_download_url(bucket, filename) do + @http_adapter.get(url) + end + end + + defp get_download_url(bucket, filename) do + Minio.presign_get_object( + client(), + bucket_name: bucket, + object_name: filename + ) + end + + @impl true + def delete(_bucket, _filename) do + # The Minio client does not provide a delete function. + {:ok, %{status_code: 204, body: ""}} + end + + defp client() do + %Minio{ + endpoint: config()[:minio_url], + access_key: config()[:minio_access_key], + secret_key: config()[:minio_secret_key] + } + end + + defp config(), do: Application.get_env(:jade, :storage) +end diff --git a/src/core/dbt-api/lib/jade/storage/s3.ex b/src/core/dbt-api/lib/jade/storage/s3.ex new file mode 100644 index 00000000..47f6e9b3 --- /dev/null +++ b/src/core/dbt-api/lib/jade/storage/s3.ex @@ -0,0 +1,35 @@ +defmodule Jade.Storage.S3 do + @moduledoc """ + The Storage Adapter for S3 used in production. + """ + + @behaviour Jade.Storage.Adapter + @http_adapter Application.compile_env(:jade, :http_adapter) + + @impl true + def upload(bucket, filename, contents) do + with {:ok, url} <- get_presigned_url(bucket, filename, :put) do + @http_adapter.put(url, contents) + end + end + + @impl true + def download(bucket, filename) do + with {:ok, url} <- get_presigned_url(bucket, filename, :get) do + @http_adapter.get(url) + end + end + + @impl true + def delete(bucket, filename) do + with {:ok, url} <- get_presigned_url(bucket, filename, :delete) do + @http_adapter.delete(url) + end + end + + defp get_presigned_url(bucket, filename, http_operation) do + :s3 + |> ExAws.Config.new() + |> ExAws.S3.presigned_url(http_operation, bucket, filename) + end +end diff --git a/src/core/dbt-api/lib/jade/storage/storage.ex b/src/core/dbt-api/lib/jade/storage/storage.ex new file mode 100644 index 00000000..698f5407 --- /dev/null +++ b/src/core/dbt-api/lib/jade/storage/storage.ex @@ -0,0 +1,42 @@ +defmodule Jade.Storage do + @moduledoc """ + The interface for uploading and downloading files from and to Minio or S3. + """ + + require Logger + + def upload(filename, content) do + response = adapter().upload(bucket(), filename, content) + + with {:ok, _body} <- handle_response(response) do + :ok + end + end + + def download(filename) do + response = adapter().download(bucket(), filename) + handle_response(response) + end + + def delete(filename) do + response = adapter().delete(bucket(), filename) + + with {:ok, _body} <- handle_response(response) do + :ok + end + end + + defp handle_response(:ok), do: :ok + defp handle_response({_result, response}), do: handle_response(response) + defp handle_response(%{status_code: 200, body: body}), do: {:ok, body} + defp handle_response(%{status_code: 204, body: body}), do: {:ok, body} + defp handle_response(%{status_code: 404}), do: {:error, :not_found} + + defp handle_response(%{status_code: error_code, body: body}) do + Logger.error("HTTP Request failed - #{error_code} - #{inspect(body)}") + {:error, :http_request_failed} + end + + defp adapter(), do: Application.get_env(:jade, :storage)[:adapter] + defp bucket(), do: Application.get_env(:jade, :storage)[:bucket] +end diff --git a/src/core/dbt-api/lib/jade_web.ex b/src/core/dbt-api/lib/jade_web.ex new file mode 100644 index 00000000..51e5404b --- /dev/null +++ b/src/core/dbt-api/lib/jade_web.ex @@ -0,0 +1,129 @@ +defmodule JadeWeb do + @moduledoc """ + The entrypoint for defining your web interface, such + as controllers, components, channels, and so on. + + This can be used in your application as: + + use JadeWeb, :controller + use JadeWeb, :html + + The definitions below will be executed for every controller, + component, etc, so keep them short and clean, focused + on imports, uses and aliases. + + Do NOT define functions inside the quoted expressions + below. Instead, define additional modules and import + those modules here. + """ + + def static_paths, do: ~w(assets fonts images favicon.ico robots.txt) + + def router do + quote do + use Phoenix.Router, helpers: false + + # Import common connection and controller functions to use in pipelines + import Plug.Conn + import Phoenix.Controller + import Phoenix.LiveView.Router + end + end + + def channel do + quote do + use Phoenix.Channel + end + end + + def controller do + quote do + use Phoenix.Controller, + formats: [:html, :json], + layouts: [html: JadeWeb.Layouts] + + use OpenApiSpex.ControllerSpecs + + alias JadeWeb.OpenApi.Generic + alias JadeWeb.OpenApi.Schemas + alias JadeWeb.OpenApi.Schemas.Pagination + + import Plug.Conn + use Gettext, backend: JadeWeb.Gettext + + action_fallback(JadeWeb.API.FallbackController) + + unquote(verified_routes()) + end + end + + def live_view do + quote do + use Phoenix.LiveView, + layout: {JadeWeb.Layouts, :app} + + unquote(html_helpers()) + end + end + + def live_component do + quote do + use Phoenix.LiveComponent + + unquote(html_helpers()) + end + end + + def html do + quote do + use Phoenix.Component + + # Import convenience functions from controllers + import Phoenix.Controller, + only: [get_csrf_token: 0, view_module: 1, view_template: 1] + + # Include general helpers for rendering HTML + unquote(html_helpers()) + end + end + + defp html_helpers do + quote do + # HTML escaping functionality + import Phoenix.HTML + # Core UI components and translation + import JadeWeb.CoreComponents + use Gettext, backend: JadeWeb.Gettext + + # Shortcut for generating JS commands + alias Phoenix.LiveView.JS + + # Routes generation with the ~p sigil + unquote(verified_routes()) + end + end + + def openapi_schema do + quote do + require OpenApiSpex + alias OpenApiSpex.Schema + alias JadeWeb.OpenApi.Generic + end + end + + def verified_routes do + quote do + use Phoenix.VerifiedRoutes, + endpoint: JadeWeb.Endpoint, + router: JadeWeb.Router, + statics: JadeWeb.static_paths() + end + end + + @doc """ + When used, dispatch to the appropriate controller/view/etc. + """ + defmacro __using__(which) when is_atom(which) do + apply(__MODULE__, which, []) + end +end diff --git a/src/core/dbt-api/lib/jade_web/components/core_components.ex b/src/core/dbt-api/lib/jade_web/components/core_components.ex new file mode 100644 index 00000000..ca04bed5 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/components/core_components.ex @@ -0,0 +1,666 @@ +defmodule JadeWeb.CoreComponents do + @moduledoc """ + Provides core UI components. + + At first glance, this module may seem daunting, but its goal is to provide + core building blocks for your application, such as modals, tables, and + forms. The components consist mostly of markup and are well-documented + with doc strings and declarative assigns. You may customize and style + them in any way you want, based on your application growth and needs. + + The default components use Tailwind CSS, a utility-first CSS framework. + See the [Tailwind CSS documentation](https://tailwindcss.com) to learn + how to customize them or feel free to swap in another framework altogether. + + Icons are provided by [heroicons](https://heroicons.com). See `icon/1` for usage. + """ + use Phoenix.Component + + alias Phoenix.LiveView.JS + use Gettext, backend: JadeWeb.Gettext + + @doc """ + Renders a modal. + + ## Examples + + <.modal id="confirm-modal"> + This is a modal. + + + JS commands may be passed to the `:on_cancel` to configure + the closing/cancel event, for example: + + <.modal id="confirm" on_cancel={JS.navigate(~p"/posts")}> + This is another modal. + + + """ + attr :id, :string, required: true + attr :show, :boolean, default: false + attr :on_cancel, JS, default: %JS{} + slot :inner_block, required: true + + def modal(assigns) do + ~H""" + + """ + end + + def input(%{type: "select"} = assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + def input(%{type: "textarea"} = assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + # All other inputs text, datetime-local, url, password, etc. are handled here... + def input(assigns) do + ~H""" +
+ <.label for={@id}>{@label} + + <.error :for={msg <- @errors}>{msg} +
+ """ + end + + @doc """ + Renders a label. + """ + attr :for, :string, default: nil + slot :inner_block, required: true + + def label(assigns) do + ~H""" + + """ + end + + @doc """ + Generates a generic error message. + """ + slot :inner_block, required: true + + def error(assigns) do + ~H""" +

+ <.icon name="hero-exclamation-circle-mini" class="mt-0.5 h-5 w-5 flex-none" /> + {render_slot(@inner_block)} +

+ """ + end + + @doc """ + Renders a header with title. + """ + attr :class, :string, default: nil + + slot :inner_block, required: true + slot :subtitle + slot :actions + + def header(assigns) do + ~H""" +
+
+

+ {render_slot(@inner_block)} +

+

+ {render_slot(@subtitle)} +

+
+
{render_slot(@actions)}
+
+ """ + end + + @doc ~S""" + Renders a table with generic styling. + + ## Examples + + <.table id="users" rows={@users}> + <:col :let={user} label="id"><%= user.id %> + <:col :let={user} label="username"><%= user.username %> + + """ + attr :id, :string, required: true + attr :rows, :list, required: true + attr :row_id, :any, default: nil, doc: "the function for generating the row id" + attr :row_click, :any, default: nil, doc: "the function for handling phx-click on each row" + + attr :row_item, :any, + default: &Function.identity/1, + doc: "the function for mapping each row before calling the :col and :action slots" + + slot :col, required: true do + attr :label, :string + end + + slot :action, doc: "the slot for showing user actions in the last table column" + + def table(assigns) do + assigns = + with %{rows: %Phoenix.LiveView.LiveStream{}} <- assigns do + assign(assigns, row_id: assigns.row_id || fn {id, _item} -> id end) + end + + ~H""" +
+ + + + + + + + + + + + + +
{col[:label]} + {gettext("Actions")} +
+
+ + + {render_slot(col, @row_item.(row))} + +
+
+
+ + + {render_slot(action, @row_item.(row))} + +
+
+
+ """ + end + + @doc """ + Renders a data list. + + ## Examples + + <.list> + <:item title="Title"><%= @post.title %> + <:item title="Views"><%= @post.views %> + + """ + slot :item, required: true do + attr :title, :string, required: true + end + + def list(assigns) do + ~H""" +
+
+
+
{item.title}
+
{render_slot(item)}
+
+
+
+ """ + end + + @doc """ + Renders a back navigation link. + + ## Examples + + <.back navigate={~p"/posts"}>Back to posts + """ + attr :navigate, :any, required: true + slot :inner_block, required: true + + def back(assigns) do + ~H""" +
+ <.link navigate={@navigate} class="text-sm font-semibold leading-6 text-zinc-900 hover:text-zinc-700"> + <.icon name="hero-arrow-left-solid" class="h-3 w-3" /> + {render_slot(@inner_block)} + +
+ """ + end + + @doc """ + Renders a [Heroicon](https://heroicons.com). + + Heroicons come in three styles – outline, solid, and mini. + By default, the outline style is used, but solid and mini may + be applied by using the `-solid` and `-mini` suffix. + + You can customize the size and colors of the icons by setting + width, height, and background color classes. + + Icons are extracted from your `assets/vendor/heroicons` directory and bundled + within your compiled app.css by the plugin in your `assets/tailwind.config.js`. + + ## Examples + + <.icon name="hero-x-mark-solid" /> + <.icon name="hero-arrow-path" class="ml-1 w-3 h-3 animate-spin" /> + """ + attr :name, :string, required: true + attr :class, :string, default: nil + + def icon(%{name: "hero-" <> _} = assigns) do + ~H""" + + """ + end + + ## JS Commands + + def show(js \\ %JS{}, selector) do + JS.show(js, + to: selector, + transition: + {"transition-all transform ease-out duration-300", "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95", + "opacity-100 translate-y-0 sm:scale-100"} + ) + end + + def hide(js \\ %JS{}, selector) do + JS.hide(js, + to: selector, + time: 200, + transition: + {"transition-all transform ease-in duration-200", "opacity-100 translate-y-0 sm:scale-100", + "opacity-0 translate-y-4 sm:translate-y-0 sm:scale-95"} + ) + end + + def show_modal(js \\ %JS{}, id) when is_binary(id) do + js + |> JS.show(to: "##{id}") + |> JS.show( + to: "##{id}-bg", + transition: {"transition-all transform ease-out duration-300", "opacity-0", "opacity-100"} + ) + |> show("##{id}-container") + |> JS.add_class("overflow-hidden", to: "body") + |> JS.focus_first(to: "##{id}-content") + end + + def hide_modal(js \\ %JS{}, id) do + js + |> JS.hide( + to: "##{id}-bg", + transition: {"transition-all transform ease-in duration-200", "opacity-100", "opacity-0"} + ) + |> hide("##{id}-container") + |> JS.hide(to: "##{id}", transition: {"block", "block", "hidden"}) + |> JS.remove_class("overflow-hidden", to: "body") + |> JS.pop_focus() + end + + @doc """ + Translates an error message using gettext. + """ + def translate_error({msg, opts}) do + # When using gettext, we typically pass the strings we want + # to translate as a static argument: + # + # # Translate the number of files with plural rules + # dngettext("errors", "1 file", "%{count} files", count) + # + # However the error messages in our forms and APIs are generated + # dynamically, so we need to translate them by calling Gettext + # with our gettext backend as first argument. Translations are + # available in the errors.po file (as we use the "errors" domain). + if count = opts[:count] do + Gettext.dngettext(JadeWeb.Gettext, "errors", msg, msg, count, opts) + else + Gettext.dgettext(JadeWeb.Gettext, "errors", msg, opts) + end + end + + @doc """ + Translates the errors for a field from a keyword list of errors. + """ + def translate_errors(errors, field) when is_list(errors) do + for {^field, {msg, opts}} <- errors, do: translate_error({msg, opts}) + end +end diff --git a/src/core/dbt-api/lib/jade_web/components/layouts.ex b/src/core/dbt-api/lib/jade_web/components/layouts.ex new file mode 100644 index 00000000..23c21d6c --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/components/layouts.ex @@ -0,0 +1,5 @@ +defmodule JadeWeb.Layouts do + use JadeWeb, :html + + embed_templates "layouts/*" +end diff --git a/src/core/dbt-api/lib/jade_web/components/layouts/app.html.heex b/src/core/dbt-api/lib/jade_web/components/layouts/app.html.heex new file mode 100644 index 00000000..9a9f78c6 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/components/layouts/app.html.heex @@ -0,0 +1,29 @@ +
+
+
+ + + +

+ v{Application.spec(:phoenix, :vsn)} +

+
+ +
+
+
+
+ <.flash_group flash={@flash} /> + {@inner_content} +
+
diff --git a/src/core/dbt-api/lib/jade_web/components/layouts/root.html.heex b/src/core/dbt-api/lib/jade_web/components/layouts/root.html.heex new file mode 100644 index 00000000..8d3f8ae1 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/components/layouts/root.html.heex @@ -0,0 +1,17 @@ + + + + + + + <.live_title suffix=" · Phoenix Framework"> + {assigns[:page_title] || "Jade"} + + + + + + {@inner_content} + + diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_controller.ex new file mode 100644 index 00000000..61f17177 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_controller.ex @@ -0,0 +1,190 @@ +defmodule JadeWeb.API.Datacoves.FileController do + use JadeWeb, :controller + + alias Jade.Files.FileRepo + + tags ["datacoves"] + + operation(:create, + summary: "Create one or multiple files for a given environment.", + parameters: [ + environment_slug: [ + in: :path, + description: "Environment Slug", + type: :string, + example: "env123" + ] + ], + request_body: + {"File Upload", "multipart/form-data", Schemas.OneOrMultipleFileUploads, required: true}, + responses: [ + ok: Generic.response(Schemas.FileOrFilesResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def create(conn, %{"file" => %Plug.Upload{} = upload} = params) do + params = + Map.merge(params, %{"filename" => upload.filename, "contents" => File.read!(upload.path)}) + + with {:ok, file} <- FileRepo.create_file(params) do + conn + |> put_resp_header( + "location", + ~p"/api/v2/datacoves/environments/#{file.environment_slug}/files/?#{[slug: file.slug]}" + ) + |> put_status(:created) + |> render(:show, file: file) + end + end + + def create(conn, %{"environment_slug" => environment_slug, "files" => files}) + when is_map(files) do + files = + Enum.map(files, fn {_idx, %{"file" => %Plug.Upload{} = upload} = file_params} -> + Map.merge(file_params, %{ + "environment_slug" => environment_slug, + "filename" => upload.filename, + "contents" => File.read!(upload.path) + }) + end) + + with {:ok, files} <- FileRepo.create_files(files) do + conn + |> put_status(:created) + |> render(:index, files: files) + end + end + + operation(:show, + summary: "Shows a single file.", + parameters: [ + environment_slug: [ + in: :path, + description: "Environment Slug", + type: :string, + required: true, + example: "env123" + ], + slug: [in: :query, description: "File slug", type: :string, required: false], + tag: [in: :query, description: "File tag", type: :string, required: false], + filename: [in: :query, description: "File filename", type: :string, required: false] + ], + responses: [ + ok: Generic.response(Schemas.FileResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{"environment_slug" => environment_slug, "slug" => slug}) when slug != "" do + with {:ok, file} <- + FileRepo.get_and_download_file_by(environment_slug: environment_slug, slug: slug) do + render(conn, :show, file: file) + end + end + + def show(conn, %{"environment_slug" => environment_slug, "tag" => tag}) when tag != "" do + with {:ok, file} <- + FileRepo.get_and_download_file_by(environment_slug: environment_slug, tag: tag) do + render(conn, :show, file: file) + end + end + + def show(conn, %{"environment_slug" => environment_slug, "filename" => filename}) + when filename != "" do + attrs = %{environment_slug: environment_slug, filename: filename} + + with {:ok, file} <- FileRepo.get_and_download_file_by(attrs) do + render(conn, :show, file: file) + end + end + + operation(:update, + summary: "Updates a file.", + parameters: [ + environment_slug: [ + in: :path, + description: "Environment Slug", + type: :string, + example: "env123" + ], + slug: [ + in: :query, + description: "A File slug. Supersedes the tag.", + type: :string, + required: false + ], + tag: [in: :query, description: "A File tag", type: :string, required: false] + ], + request_body: {"File Upload", "multipart/form-data", Schemas.FileUpload, required: true}, + responses: [ + ok: Generic.response(Schemas.FileResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def update(conn, %{"environment_slug" => environment_slug, "slug" => slug} = params) + when slug != "" do + with {:ok, file} <- FileRepo.get_file_by(slug: slug, environment_slug: environment_slug) do + do_update(conn, file, params) + end + end + + def update(conn, %{"environment_slug" => environment_slug, "tag" => tag} = params) + when tag != "" do + with {:ok, file} <- FileRepo.get_file_by(tag: tag, environment_slug: environment_slug) do + do_update(conn, file, params) + end + end + + defp do_update(conn, file, %{"file" => %Plug.Upload{} = upload} = params) do + with {:ok, contents} <- File.read(upload.path), + params <- Map.merge(params, %{"contents" => contents, "filename" => upload.filename}), + {:ok, file} <- FileRepo.update_file(file, params) do + render(conn, :show, file: file) + end + end + + operation(:delete, + summary: "Deletes a file.", + parameters: [ + environment_slug: [ + in: :path, + description: "Environment Slug", + type: :string, + example: "env123" + ], + slug: [ + in: :query, + description: "A File slug. Supersedes the tag.", + type: :string, + required: false + ], + tag: [in: :query, description: "A File tag", type: :string, required: false] + ], + responses: [ + ok: Generic.response(Schemas.SuccessResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def delete(conn, %{"environment_slug" => environment_slug, "slug" => slug} = _params) + when slug != "" do + with {:ok, file} <- FileRepo.get_file_by(slug: slug, environment_slug: environment_slug), + {:ok, _file} <- FileRepo.delete_file(file) do + send_resp(conn, 200, "") + end + end + + def delete(conn, %{"environment_slug" => environment_slug, "tag" => tag} = _params) + when tag != "" do + with {:ok, file} <- FileRepo.get_file_by(tag: tag, environment_slug: environment_slug), + {:ok, _file} <- FileRepo.delete_file(file) do + send_resp(conn, 200, "") + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_controller_test.exs new file mode 100644 index 00000000..ef7c29a6 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_controller_test.exs @@ -0,0 +1,441 @@ +defmodule JadeWeb.FileControllerTest do + use JadeWeb.ConnCase, async: false + + import ExUnit.CaptureLog + + alias Jade.Files.FileRepo + + @fixture_path "test/support/fixtures/manifest.json" + @fail_fixture_path "test/support/fixtures/fail.txt" + + setup %{conn: conn} do + attrs = insert_two_accounts_with_repos() + user = insert(:user, is_service_account: true) + + auth_token = + insert_auth_token_for_user(user, attrs.account_1, attrs.environment_1, attrs.project_1) + + conn = + conn |> put_bearer_token(auth_token.key) |> put_req_header("accept", "application/json") + + Map.merge(attrs, %{conn: conn, user: user}) + end + + describe "create file" do + test "renders file when data is valid", %{conn: conn} = ctx do + params = %{ + tag: "tag-123", + file: %Plug.Upload{path: @fixture_path, filename: "filename.json"} + } + + conn = + conn + |> put_req_header("content-type", "multipart/form-data") + |> post(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + + assert %{"slug" => slug} = json_response(conn, 201)["data"] + + file = FileRepo.get_file!(slug) + + assert file.slug == slug + assert file.filename == "filename.json" + assert file.tag == "tag-123" + assert file.path == "/environments/airflow1/files/tag-123/filename.json" + # The contents are virtual and are stored only in the Jade.Storage bucket + assert file.contents == nil + end + + test "upload multiple files at once", %{conn: conn} = ctx do + params = %{ + files: [ + {"0", + %{ + tag: "some tag 1", + file: %Plug.Upload{path: @fixture_path, filename: "filename-1.json"} + }}, + {"1", + %{ + tag: "some tag 2", + file: %Plug.Upload{path: @fixture_path, filename: "filename-2.json"} + }} + ] + } + + conn = + conn + |> put_req_header("content-type", "multipart/form-data") + |> post(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + + %{"data" => files} = json_response(conn, 201) + + assert [ + %{ + "slug" => slug_1, + "tag" => "some tag 1", + "filename" => "filename-1.json", + "contents" => %{"child_map" => %{"seed.balboa.state_codes" => []}}, + "path" => "/environments/airflow1/files/some-tag-1/filename-1.json", + "environment_slug" => "airflow1" + }, + %{ + "slug" => slug_2, + "tag" => "some tag 2", + "filename" => "filename-2.json", + "contents" => %{"child_map" => %{"seed.balboa.state_codes" => []}}, + "path" => "/environments/airflow1/files/some-tag-2/filename-2.json", + "environment_slug" => "airflow1" + } + ] = files + + file_1 = FileRepo.get_file!(slug_1) + + assert file_1.slug == slug_1 + assert file_1.tag == "some tag 1" + assert file_1.filename == "filename-1.json" + assert file_1.path == "/environments/airflow1/files/some-tag-1/filename-1.json" + assert file_1.contents == nil + assert file_1.environment_slug == "airflow1" + + file_2 = FileRepo.get_file!(slug_2) + + assert file_2.slug == slug_2 + assert file_2.filename == "filename-2.json" + assert file_2.tag == "some tag 2" + assert file_2.path == "/environments/airflow1/files/some-tag-2/filename-2.json" + assert file_2.contents == nil + assert file_1.environment_slug == "airflow1" + end + + test "returns an error and deletes existing files if one file can't be created", + %{conn: conn} = ctx do + params = %{ + "files" => [ + {"0", + %{ + "tag" => "some tag 1", + "file" => %Plug.Upload{path: @fixture_path, filename: "some filename 1"} + }}, + {"1", + %{ + "tag" => "some tag 2", + "file" => %Plug.Upload{path: @fail_fixture_path, filename: ""} + }} + ] + } + + assert capture_log(fn -> + assert conn + |> post( + ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", + params + ) + |> json_response(422) == %{"errors" => %{"filename" => ["can't be blank"]}} + end) =~ "Creating multiple files failed:" + + assert {:error, :not_found} = FileRepo.get_file_by(tag: "some tag 1") + assert {:error, :not_found} = FileRepo.get_file_by(tag: "some tag 2") + end + + test "allows files with duplicate tags if they are in separate environments", + %{conn: conn} = ctx do + insert(:file, tag: "foobar", environment_slug: ctx.environment_1.slug) + + params = %{ + tag: "foobar", + file: %Plug.Upload{path: @fixture_path, filename: "filename.json"} + } + + auth_token = + insert_auth_token_for_user(ctx.user, ctx.account_1, ctx.environment_2, ctx.project_1) + + %{"data" => %{"slug" => slug}} = + conn + |> put_bearer_token(auth_token.key) + |> post(~p"/api/v2/datacoves/environments/#{ctx.environment_2.slug}/files", params) + |> json_response(201) + + file = FileRepo.get_file!(slug) + + assert file.slug == slug + assert file.tag == "foobar" + assert file.filename == "filename.json" + assert file.path == "/environments/airflow2/files/foobar/filename.json" + assert file.contents == nil + assert file.environment_slug == "airflow2" + end + + test "does not return error if file is repeated and new version is stored", + %{conn: conn} = ctx do + insert(:file, tag: "foobar", environment_slug: ctx.environment_1.slug) + + params = %{ + tag: "foobar", + file: %Plug.Upload{path: @fixture_path, filename: "some filename"} + } + + assert conn + |> post(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(201) + + {:ok, files} = + FileRepo.get_files_by(%{environment_slug: ctx.environment_1.slug, tag: "foobar"}) + + assert length(files) == 2 + end + + test "renders errors when data is invalid", %{conn: conn} = ctx do + params = %{ + tag: "some tag", + file: %Plug.Upload{path: @fixture_path, filename: ""} + } + + conn = + post(conn, ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + + assert json_response(conn, 422) == %{"errors" => %{"filename" => ["can't be blank"]}} + end + + test "returns an error if the file could not be uploaded", %{conn: conn} = ctx do + params = %{ + tag: "some tag", + file: %Plug.Upload{path: @fail_fixture_path, filename: "some filename"} + } + + assert capture_log(fn -> + assert conn + |> post( + ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", + params + ) + |> json_response(422) == %{ + "errors" => %{ + "message" => "File upload failed. Please check the error in the logs." + } + } + end) =~ "HTTP Request failed - 400 - \"bad request\"" + end + end + + describe "show/2" do + setup [:create_file] + + test "returns a file with its contents for a slug", %{conn: conn, jade_file: file} = ctx do + params = %{slug: file.slug} + + res_file = + conn + |> get(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(200) + |> Map.get("data") + + assert res_file["slug"] == file.slug + assert res_file["tag"] == file.tag + assert res_file["filename"] == file.filename + assert res_file["environment_slug"] == file.environment_slug + assert res_file["path"] == file.path + assert %{"child_map" => %{"seed.balboa.state_codes" => []}} = res_file["contents"] + end + + test "returns a file by its tag", %{conn: conn, jade_file: file} = ctx do + params = %{tag: file.tag} + + res_file = + conn + |> get(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(200) + |> Map.get("data") + + assert res_file["slug"] == file.slug + assert %{"child_map" => %{"seed.balboa.state_codes" => []}} = res_file["contents"] + end + + test "returns latest file based on filename", %{conn: conn, jade_file: file} = ctx do + file = + insert(:file, + tag: file.tag, + filename: file.filename, + environment_slug: ctx.environment_1.slug, + inserted_at: DateTime.utc_now() |> DateTime.add(10, :second) + ) + + params = %{filename: file.filename} + + res_file = + conn + |> get(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(200) + |> Map.get("data") + + assert res_file["slug"] == file.slug + assert %{"child_map" => %{"seed.balboa.state_codes" => []}} = res_file["contents"] + + assert res_file["filename"] == file.filename + assert res_file["environment_slug"] == file.environment_slug + assert res_file["inserted_at"] == file.inserted_at |> DateTime.to_iso8601() + end + + test "returns an error if the file could not be downloaded", %{conn: conn} = ctx do + file = insert(:file, path: "/fail-upload", environment_slug: ctx.environment_1.slug) + + params = %{tag: file.tag} + + assert capture_log(fn -> + assert conn + |> get( + ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", + params + ) + |> json_response(422) == %{ + "errors" => %{ + "message" => "The request to the storage bucket failed." + } + } + end) =~ "HTTP Request failed - 400 - \"bad request\"" + end + + test "returns a 404 if the file does not belong to the requested environment", + %{conn: conn} = ctx do + file = insert(:file, environment_slug: "different-env") + + params = %{slug: file.slug} + + assert conn + |> get(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(404) + end + + test "returns a 404 if the file wasn't found by its slug", %{conn: conn} = ctx do + params = %{slug: Ecto.UUID.generate()} + + assert conn + |> get(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(404) + end + + test "returns a 404 if the file wasn't found by its tag", %{conn: conn} = ctx do + params = %{tag: "not-found"} + + assert conn + |> get(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(404) + end + end + + describe "update file" do + setup [:create_file] + + test "renders file when data is valid", %{conn: conn, jade_file: file} = ctx do + params = %{ + slug: file.slug, + tag: "updated tag", + file: %Plug.Upload{path: @fixture_path, filename: "updated-filename.json"} + } + + conn = put(conn, ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + assert %{"slug" => slug} = json_response(conn, 200)["data"] + + file = FileRepo.get_file!(slug) + + assert file.slug == slug + assert file.filename == "updated-filename.json" + assert file.tag == "updated tag" + + assert file.path == + "/environments/#{ctx.environment_1.slug}/files/updated-tag/updated-filename.json" + + # The contents are virtual and are stored only in the Jade.Storage bucket + assert file.contents == nil + end + + test "updates a file by its tag", %{conn: conn, jade_file: file} = ctx do + params = %{ + tag: file.tag, + file: %Plug.Upload{path: @fixture_path, filename: "updated filename"} + } + + conn = + conn + |> put_req_header("content-type", "multipart/form-data") + |> put(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + + assert %{"slug" => slug} = json_response(conn, 200)["data"] + + file = FileRepo.get_file!(slug) + assert file.filename == "updated filename" + end + + test "renders errors when data is invalid", %{conn: conn, jade_file: file} = ctx do + params = %{ + slug: file.slug, + tag: "", + filename: "", + file: %Plug.Upload{path: @fixture_path} + } + + conn = put(conn, ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + + assert %{ + "errors" => %{ + "filename" => ["can't be blank"] + } + } = + json_response(conn, 422) + end + + test "returns an error if the file could not be uploaded", + %{conn: conn, jade_file: file} = ctx do + params = %{ + slug: file.slug, + file: %Plug.Upload{path: @fail_fixture_path, filename: "updated filename"} + } + + assert capture_log(fn -> + assert conn + |> put( + ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", + params + ) + |> json_response(422) == %{ + "errors" => %{ + "message" => "File upload failed. Please check the error in the logs." + } + } + end) =~ "HTTP Request failed - 400 - \"bad request\"" + end + + test "returns 404 if the file does not exist", %{conn: conn} = ctx do + params = %{slug: Ecto.UUID.generate()} + + assert put(conn, ~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> json_response(404) + end + end + + describe "delete file" do + setup [:create_file] + + test "deletes chosen file by its slug", %{conn: conn, jade_file: file} = ctx do + params = %{slug: file.slug} + + assert conn + |> delete(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> response(200) + + assert reload(file) == nil + end + + test "deletes chosen file by its tag", %{conn: conn, jade_file: file} = ctx do + params = %{tag: file.tag} + + assert conn + |> delete(~p"/api/v2/datacoves/environments/#{ctx.environment_1.slug}/files", params) + |> response(200) + + assert reload(file) == nil + end + end + + def create_file(%{environment_1: environment_1}) do + %{jade_file: insert(:file, environment_slug: environment_1.slug)} + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_json.ex new file mode 100644 index 00000000..3b253ee0 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/file/file_json.ex @@ -0,0 +1,29 @@ +defmodule JadeWeb.API.Datacoves.FileJSON do + alias Jade.Files.File + + def index(%{files: files}) do + %{data: for(file <- files, do: data(file))} + end + + def show(%{file: file}) do + %{data: data(file)} + end + + defp data(%File{} = file) do + %{ + slug: file.slug, + filename: file.filename, + tag: file.tag, + contents: encode_contents(file.contents), + environment_slug: file.environment_slug, + path: file.path, + inserted_at: file.inserted_at + } + end + + defp encode_contents(contents) when is_binary(contents) do + Jason.Fragment.new(contents) + end + + defp encode_contents(contents), do: contents +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/healthcheck/healthcheck_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/healthcheck/healthcheck_controller.ex new file mode 100644 index 00000000..16437e4e --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/healthcheck/healthcheck_controller.ex @@ -0,0 +1,14 @@ +defmodule JadeWeb.API.Datacoves.HealthcheckController do + use JadeWeb, :controller + + tags ["datacoves"] + + operation(:show, + summary: "Returns 'ok' if the application if online.", + responses: [ + ok: Generic.ok() + ] + ) + + def show(conn, _params), do: send_resp(conn, 200, "ok") +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/healthcheck/healthcheck_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/healthcheck/healthcheck_controller_test.exs new file mode 100644 index 00000000..fc09e49d --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/healthcheck/healthcheck_controller_test.exs @@ -0,0 +1,15 @@ +defmodule JadeWeb.API.Datacoves.HealthcheckControllerTest do + use JadeWeb.ConnCase, async: true + + test "returns 200 for the /api/v2 scope", %{conn: conn} do + conn = get(conn, ~p"/api/v2/healthcheck") + assert conn.status == 200 + assert conn.resp_body == "ok" + end + + test "returns 200 for the /api/internal scope", %{conn: conn} do + conn = get(conn, ~p"/api/internal/healthcheck") + assert conn.status == 200 + assert conn.resp_body == "ok" + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/manifest/manifest_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/manifest/manifest_controller.ex new file mode 100644 index 00000000..506696c4 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/manifest/manifest_controller.ex @@ -0,0 +1,122 @@ +defmodule JadeWeb.API.Datacoves.ManifestController do + use JadeWeb, :controller + + alias Jade.Environments.EnvironmentRepo + alias Jade.Manifests.ManifestRepo + + require Logger + + tags ["datacoves"] + + operation(:create, + summary: "Create a manifest for a given environment.", + parameters: [ + environment_slug: [ + in: :path, + description: "Environment Slug", + type: :string, + example: "env123" + ], + dag_id: [in: :query, description: "Dag ID", type: :string, example: "sample_dag"], + run_id: [ + in: :query, + description: "Dag Run Run ID", + type: :string, + example: "manual__2023-12-02T09:49:46.105347+00:00" + ] + ], + request_body: {"File Upload", "multipart/form-data", Schemas.FileUpload, required: true}, + responses: [ + ok: Generic.response(Schemas.FileOrFilesResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def create( + conn, + %{ + "file" => upload, + "dag_id" => dag_id, + "run_id" => run_id, + "environment_slug" => environment_slug + } = params + ) do + tag = Map.get(params, "tag") + + with {:ok, content} <- File.read(upload.path), + {:ok, _manifest} <- ManifestRepo.create(environment_slug, dag_id, run_id, content, tag) do + conn + |> put_status(:created) + |> json(:ok) + end + end + + def create(conn, %{"file" => upload, "environment_slug" => environment_slug} = params) do + tag = Map.get(params, "tag") + + with {:ok, content} <- File.read(upload.path), + {:ok, _manifest} <- ManifestRepo.create(environment_slug, content, tag) do + conn + |> put_status(:created) + |> json(:ok) + end + end + + operation(:show, + summary: "Shows a single manifest.", + parameters: [ + environment_slug: [ + in: :path, + description: "Environment Slug", + type: :string, + example: "env123" + ], + dag_id: [in: :query, description: "Dag ID", type: :string, example: "sample_dag"], + tag: [in: :query, description: "Manifest tag", type: :string], + trimmed: [in: :query, description: "Get trimmed manifest", type: :boolean] + ], + responses: [ + ok: Generic.response(Schemas.FileContent), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{"dag_id" => dag_id} = params) do + with {:ok, manifest} <- ManifestRepo.get_by(dag_id: dag_id), + content <- get_manifest(manifest, params) do + conn + |> put_status(:ok) + |> json(content) + end + end + + def show(conn, %{"environment_slug" => environment_slug, "tag" => tag} = params) do + with {:ok, environment} <- EnvironmentRepo.get_by_slug(environment_slug), + {:ok, manifest} <- ManifestRepo.get_by(environment_slug: environment.slug, tag: tag), + content <- get_manifest(manifest, params) do + conn + |> put_status(:ok) + |> json(content) + end + end + + def show(conn, %{"environment_slug" => environment_slug} = params) do + with {:ok, environment} <- EnvironmentRepo.get_by_slug(environment_slug), + {:ok, manifest} <- ManifestRepo.get_latest_for_environment(environment), + content <- get_manifest(manifest, params) do + conn + |> put_status(:ok) + |> json(content) + end + end + + defp get_manifest(manifest, %{"trimmed" => trimmed}) when trimmed in [false, "false"] do + ManifestRepo.get_full_content(manifest) + end + + defp get_manifest(manifest, _params) do + ManifestRepo.get_minimal_content(manifest) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/manifest/manifest_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/manifest/manifest_controller_test.exs new file mode 100644 index 00000000..2b6e543d --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/manifest/manifest_controller_test.exs @@ -0,0 +1,303 @@ +defmodule JadeWeb.API.Datacoves.ManifestControllerTest do + use JadeWeb.ConnCase, async: false + + alias Jade.JobRunIds.JobRunId + alias Jade.Manifests.ManifestRepo + + @fixture_path "test/support/fixtures/manifest.json" + + defp path() do + ~p"/api/v2/datacoves/manifests" + end + + setup %{conn: conn} do + attrs = insert_two_accounts_with_repos() + + user = insert(:user, is_service_account: true) + + auth_token = + insert_auth_token_for_user(user, attrs.account_1, attrs.environment_1, attrs.project_1) + + conn = put_bearer_token(conn, auth_token.key) + + dag_run = + insert(:dag_run, repo: attrs.repo_1, run_id: "manual__2023-12-02T09:49:46.105347+00:00") + + Map.merge(attrs, %{conn: conn, user: user, dag_run: dag_run}) + end + + describe "create/2" do + test "creates a manifest", ctx do + params = %{ + dag_id: ctx.dag_run.dag_id, + run_id: "manual__2023-12-02T09:49:46.105347+00:00", + environment_slug: ctx.environment_1.slug, + tag: "tag-123", + file: %Plug.Upload{path: @fixture_path} + } + + ctx.conn + |> post(path(), params) + |> json_response(201) + + [job_run_id] = Jade.Repo.all(JobRunId) + {:ok, manifest} = ManifestRepo.get_by(account_id: ctx.account_1.id) + + assert manifest.account_id == ctx.account_1.id + assert manifest.project_id == ctx.environment_1.project_id + assert manifest.job_run_id == job_run_id.id + assert manifest.dag_run_run_id == params.run_id + assert manifest.tag == "tag-123" + + assert %{"metadata" => %{"dbt_version" => "1.6.9"}} = + ManifestRepo.get_full_content(manifest) + end + + test "creates a manifest with without job run", ctx do + params = %{ + environment_slug: ctx.environment_1.slug, + tag: "tag-234", + file: %Plug.Upload{path: @fixture_path} + } + + ctx.conn + |> post(path(), params) + |> json_response(201) + + {:ok, manifest} = ManifestRepo.get_by(account_id: ctx.account_1.id) + + assert manifest.account_id == ctx.account_1.id + assert manifest.project_id == ctx.environment_1.project_id + assert manifest.job_run_id == nil + assert manifest.dag_run_run_id == nil + assert manifest.tag == "tag-234" + + assert %{"metadata" => %{"dbt_version" => "1.6.9"}} = + ManifestRepo.get_full_content(manifest) + + assert %{ + "metadata" => %{ + "project_id" => "84e0991a380d2a451e9a7787e56e2b53" + } + } = + ctx.conn + |> get(path(), %{environment_slug: ctx.environment_1.slug, trimmed: true}) + |> json_response(200) + end + + test "returns an error if a manifest with the same environment and tag already exists", ctx do + insert(:manifest, environment_slug: ctx.environment_1.slug, tag: "tag-234") + + params = %{ + environment_slug: ctx.environment_1.slug, + tag: "tag-234", + file: %Plug.Upload{path: @fixture_path} + } + + assert %{"errors" => %{"tag" => ["has already been taken"]}} = + ctx.conn + |> post(path(), params) + |> json_response(422) + end + + test "returns an error if a user tries to upload a manifest to another environment", ctx do + params = %{ + dag_id: ctx.dag_run.dag_id, + run_id: "manual__2023-12-02T09:49:46.105347+00:00", + environment_slug: ctx.environment_2.slug, + file: %Plug.Upload{path: @fixture_path} + } + + assert %{ + "errors" => %{ + "message" => "Invalid Environment in Path. You have no accces to this environment." + } + } = + ctx.conn + |> post(path(), params) + |> json_response(401) + end + + test "returns an error if the airflow repo of the environment is not available", ctx do + environment = + insert(:environment, + project: ctx.project_1, + services: %{"airflow" => %{"enabled" => true}}, + slug: "env-wo-airflow" + ) + + auth_token = + insert_auth_token_for_user(ctx.user, ctx.account_1, environment, ctx.project_1) + + params = %{ + dag_id: ctx.dag_run.dag_id, + run_id: "manual__2023-12-02T09:49:46.105347+00:00", + environment_slug: environment.slug, + file: %Plug.Upload{path: @fixture_path} + } + + assert capture_log(fn -> + assert %{ + "errors" => %{ + "message" => "Airflow Repo for Environment env-wo-airflow not found." + } + } = + ctx.conn + |> put_bearer_token(auth_token.key) + |> post(path(), params) + |> json_response(404) + end) =~ "Cannot connect to Airflow repo at:" + end + end + + describe "show/2" do + test "returns a minimal manifest", ctx do + content = @fixture_path |> File.read!() |> Jason.decode!() + manifest = insert(:manifest, content: content) + + params = %{ + dag_id: manifest.dag_id, + environment_slug: ctx.environment_1.slug + } + + body = + ctx.conn + |> get(path(), params) + |> json_response(200) + + # %{ + # id: manifest_id, + # account_id: manifest_account_id, + # environment_slug: manifest_environment_slug, + # dag_id: manifest_dag_id, + # dag_run_run_id: manifest_dag_run_run_id + # } = manifest + + assert %{ + # "id" => ^manifest_id, + # "account_id" => ^manifest_account_id, + # "environment_slug" => ^manifest_environment_slug, + # "dag_id" => ^manifest_dag_id, + # "dag_run_id" => ^manifest_dag_run_run_id, + # "exposures" => %{ + # "exposure.balboa.customer_loans" => %{ + # "fqn" => ["balboa", "L4_exposures", "customer_loans"] + # # removed other fields for brevity + # }, + # "exposure.balboa.loans_analysis" => %{ + # "fqn" => ["balboa", "L4_exposures", "loans_analysis"] + # } + # }, + # "group_map" => %{}, + # "groups" => %{}, + "metadata" => %{ + "generated_at" => "2024-01-18T14:38:12.611300Z", + "project_name" => "balboa" + # removed other fields for brevity + }, + "nodes" => %{ + "model.balboa.base_cases" => %{ + "database" => "BALBOA_DEV", + "fqn" => ["balboa", "L2_bays", "covid_observations", "base_cases"], + "schema" => "gomezn" + }, + "model.balboa.country_populations" => %{ + "database" => "BALBOA_DEV", + "fqn" => ["balboa", "L1_inlets", "country_data", "country_populations"], + "schema" => "gomezn" + } + } + } = body + end + + test "returns an empty map if the manifest has no content", ctx do + manifest = insert(:manifest, content: nil) + + params = %{ + dag_id: manifest.dag_id, + environment_slug: ctx.environment_1.slug + } + + body = + ctx.conn + |> get(path(), params) + |> json_response(200) + + assert %{ + # "exposures" => nil, + # "group_map" => nil, + # "groups" => nil, + "metadata" => nil, + "nodes" => nil + } = body + end + + test "returns a manifest by environment and tag", ctx do + insert(:manifest, + environment_slug: ctx.environment_1.slug, + tag: "tag-123", + content: %{"name" => "test-123"} + ) + + params = %{ + tag: "tag-123", + environment_slug: ctx.environment_1.slug, + trimmed: false + } + + assert %{"name" => "test-123"} = + ctx.conn + |> get(path(), params) + |> json_response(200) + end + + test "returns the latest manifest for an environment", ctx do + _later_manifest = + insert(:manifest, + environment_slug: ctx.environment_1.slug, + content: %{"manifest" => "later"}, + inserted_at: ~U[2023-01-01 12:00:00Z] + ) + + _earlier_manifest = + insert(:manifest, + environment_slug: ctx.environment_1.slug, + content: %{"manifest" => "earlier"}, + inserted_at: ~U[2023-01-01 01:00:00Z] + ) + + params = %{ + environment_slug: ctx.environment_1.slug, + trimmed: false + } + + assert %{"manifest" => "later"} = + ctx.conn + |> get(path(), params) + |> json_response(200) + end + + test "returns a 404 if no manifest exists for a given environment_slug", ctx do + params = %{ + environment_slug: ctx.environment_1.slug + } + + %{"errors" => %{"message" => "Not Found"}} = + ctx.conn + |> get(path(), params) + |> json_response(404) + end + + test "returns a 404 if no manifest exists for a given dag_id", ctx do + params = %{ + dag_id: "yaml_example_dag", + environment_slug: ctx.environment_1.slug + } + + %{"errors" => %{"message" => "Not Found"}} = + ctx.conn + |> get(path(), params) + |> json_response(404) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/project/project_manifest_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/project/project_manifest_controller.ex new file mode 100644 index 00000000..ff62d0fe --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/project/project_manifest_controller.ex @@ -0,0 +1,45 @@ +defmodule JadeWeb.API.Datacoves.ProjectManifestController do + use JadeWeb, :controller + + alias Jade.Manifests.ManifestRepo + alias Jade.Projects.ProjectRepo + + require Logger + + tags ["datacoves"] + + operation(:show, + summary: "Shows the latest Manifest for a Project.", + parameters: [ + project_slug: [ + in: :path, + description: "Project Slug", + type: :string + ], + trimmed: [in: :query, description: "Get trimmed manifest", type: :boolean] + ], + responses: [ + ok: Generic.response(Schemas.FileContent), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{"project_slug" => project_slug} = params) do + with {:ok, project} <- ProjectRepo.get_by(slug: project_slug), + {:ok, manifest} <- ManifestRepo.get_latest_for_project(project), + content <- get_manifest(manifest, params) do + conn + |> put_status(:ok) + |> json(content) + end + end + + defp get_manifest(manifest, %{"trimmed" => trimmed}) when trimmed in [false, "false"] do + ManifestRepo.get_full_content(manifest) + end + + defp get_manifest(manifest, _params) do + ManifestRepo.get_minimal_content(manifest) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/project/project_manifest_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/project/project_manifest_controller_test.exs new file mode 100644 index 00000000..5a4ca48c --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/datacoves/project/project_manifest_controller_test.exs @@ -0,0 +1,156 @@ +defmodule JadeWeb.API.Datacoves.ProjectManifestControllerTest do + use JadeWeb.ConnCase, async: false + + @fixture_path "test/support/fixtures/manifest.json" + + defp path(project_slug) do + ~p"/api/v2/datacoves/projects/#{project_slug}/latest-manifest" + end + + setup %{conn: conn} do + attrs = insert_two_accounts_with_repos() + insert(:dag_run, repo: attrs.repo_1, run_id: "manual__2023-12-02T09:49:46.105347+00:00") + + user = insert(:user) + + auth_token = + insert_auth_token_for_user(user, attrs.account_1, attrs.environment_1, attrs.project_1) + + conn = put_bearer_token(conn, auth_token.key) + + Map.merge(attrs, %{conn: conn}) + end + + describe "show/2" do + test "returns a minimal manifest", ctx do + content = @fixture_path |> File.read!() |> Jason.decode!() + + # later_manifest = + insert(:manifest, + project_id: ctx.environment_1.project_id, + content: content, + inserted_at: ~U[2023-01-01 12:00:00Z] + ) + + _earlier_manifest = + insert(:manifest, + project_id: ctx.environment_1.project_id, + content: nil, + inserted_at: ~U[2023-01-01 01:00:00Z] + ) + + body = + ctx.conn + |> get(path(ctx.environment_1.project.slug)) + |> json_response(200) + + # %{ + # id: manifest_id, + # account_id: manifest_account_id, + # environment_slug: manifest_environment_slug, + # dag_id: manifest_dag_id, + # dag_run_run_id: manifest_dag_run_run_id + # } = later_manifest + + assert %{ + # "id" => ^manifest_id, + # "account_id" => ^manifest_account_id, + # "environment_slug" => ^manifest_environment_slug, + # "dag_id" => ^manifest_dag_id, + # "dag_run_id" => ^manifest_dag_run_run_id, + # "exposures" => %{ + # "exposure.balboa.customer_loans" => %{ + # "fqn" => ["balboa", "L4_exposures", "customer_loans"] + # # removed other fields for brevity + # }, + # "exposure.balboa.loans_analysis" => %{ + # "fqn" => ["balboa", "L4_exposures", "loans_analysis"] + # } + # }, + # "group_map" => %{}, + # "groups" => %{}, + "metadata" => %{ + "generated_at" => "2024-01-18T14:38:12.611300Z", + "project_name" => "balboa" + # removed other fields for brevity + }, + "nodes" => %{ + "model.balboa.base_cases" => %{ + "database" => "BALBOA_DEV", + "fqn" => ["balboa", "L2_bays", "covid_observations", "base_cases"], + "schema" => "gomezn" + }, + "model.balboa.country_populations" => %{ + "database" => "BALBOA_DEV", + "fqn" => ["balboa", "L1_inlets", "country_data", "country_populations"], + "schema" => "gomezn" + } + } + } = body + end + + test "returns an empty map if the manifest has no content", ctx do + insert(:manifest, + project_id: ctx.environment_1.project_id, + content: nil + ) + + body = + ctx.conn + |> get(path(ctx.environment_1.project.slug)) + |> json_response(200) + + assert %{ + # "exposures" => nil, + # "group_map" => nil, + # "groups" => nil, + "metadata" => nil, + "nodes" => nil + } = body + end + + test "returns the full manifest if requested", ctx do + content = @fixture_path |> File.read!() |> Jason.decode!() + + insert(:manifest, + project_id: ctx.environment_1.project_id, + content: content + ) + + params = %{ + "trimmed" => false + } + + body = + ctx.conn + |> get(path(ctx.environment_1.project.slug), params) + |> json_response(200) + + # These fields are excluded from the trimmed version + assert %{ + "selectors" => _selectors, + "metrics" => _metrics, + "sources" => _sources, + "macros" => _macros, + "docs" => _docs, + "disabled" => _disabled + } = body + end + + test "returns a 404 if no manifest exists for a given project_slug", ctx do + %{"errors" => %{"message" => "Not Found"}} = + ctx.conn + |> get(path(ctx.environment_1.project.slug)) + |> json_response(404) + end + + test "returns a 404 if no project exists for a given project_slug", ctx do + %{ + "errors" => %{"message" => "Invalid Project in Path. You have no accces to this project."} + } = + ctx.conn + |> get(path("fake-project-123")) + |> json_response(401) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/fallback_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/fallback_controller.ex new file mode 100644 index 00000000..12df7627 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/fallback_controller.ex @@ -0,0 +1,63 @@ +defmodule JadeWeb.API.FallbackController do + use Phoenix.Controller, formats: [:json] + + import Plug.Conn + + require Logger + + def call(conn, {:error, %Ecto.Changeset{} = changeset}) do + conn + |> put_status(:unprocessable_entity) + |> put_view(json: JadeWeb.ChangesetJSON) + |> render(:error, changeset: changeset) + end + + def call(conn, {:error, :unprocessable_entity, message}) do + conn + |> put_status(:unprocessable_entity) + |> put_view(json: JadeWeb.ErrorJSON) + |> render(:"422", message: message) + end + + def call(conn, {:error, :not_found}) do + conn + |> put_status(:not_found) + |> put_view(json: JadeWeb.ErrorJSON) + |> render(:"404") + end + + def call(conn, {:error, :not_found, message}) do + conn + |> put_status(:not_found) + |> put_view(json: JadeWeb.ErrorJSON) + |> render(:"404", message: message) + end + + def call(conn, {:error, :invalid_params}) do + conn + |> put_status(:bad_request) + |> put_view(json: JadeWeb.ErrorJSON) + |> render(:"400") + end + + def call(conn, {:error, _error, message}) do + conn + |> put_status(:internal_server_error) + |> JadeWeb.ErrorJSON.send_json(message) + |> halt() + end + + def call(conn, {:error, _error}) do + conn + |> put_status(:internal_server_error) + |> put_view(json: JadeWeb.ErrorJSON) + |> render(:"500") + end + + def unauthenticated(conn, message) do + conn + |> put_status(:unauthorized) + |> JadeWeb.ErrorJSON.send_json(message) + |> halt() + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_controller.ex new file mode 100644 index 00000000..f5c99285 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_controller.ex @@ -0,0 +1,27 @@ +defmodule JadeWeb.API.V2.AccountController do + use JadeWeb, :controller + + alias Jade.Accounts.AccountRepo + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + + tags ["dbt-api"] + + operation(:show, + summary: "Shows a single account.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1] + ], + responses: [ + ok: Generic.response(Schemas.ShowAccountResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{account_id: account_id}) do + with {:ok, account} <- AccountRepo.get(account_id) do + render(conn, :show, account: account) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_controller_test.exs new file mode 100644 index 00000000..03dcb29f --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_controller_test.exs @@ -0,0 +1,87 @@ +defmodule JadeWeb.API.V2.AccountControllerTest do + use JadeWeb.ConnCase, async: true + + defp build_path(account_id) do + ~p"/api/v2/accounts/#{account_id}" + end + + setup %{conn: conn} do + user = insert(:user) + account = insert(:account) + auth_token = insert_auth_token_for_user(user, account, nil, nil) + conn = put_bearer_token(conn, auth_token.key) + %{conn: conn, account: account, token: auth_token} + end + + describe "show/2" do + test "returns a single account", ctx do + %{data: account} = + ctx.conn + |> get(build_path(ctx.account.id)) + |> json_response(200) + |> assert_schema("ShowAccountResponse", api_spec()) + + assert_account_fields(account) + end + + test "returns an error if an account does not exist", ctx do + %{errors: %{message: "Invalid Account in Path. You have no accces to this account."}} = + ctx.conn + |> get(build_path(404)) + |> json_response(401) + |> assert_schema("ErrorResponse", api_spec()) + end + + test "returns an error if an account tries to access another account", ctx do + another_account = insert(:account) + + %{errors: %{message: "Invalid Account in Path. You have no accces to this account."}} = + ctx.conn + |> get(build_path(another_account.id)) + |> json_response(401) + |> assert_schema("ErrorResponse", api_spec()) + end + end + + defp assert_account_fields(account) do + assert %{ + id: _id, + name: _name, + state: _state, + plan: _plan, + pending_cancel: _pending_cancel, + run_slots: _run_slots, + developer_seats: _developer_seats, + it_seats: _it_seats, + read_only_seats: _read_only_seats, + pod_memory_request_mebibytes: _pod_memory_request_mebibytes, + run_duration_limit_seconds: _run_duration_limit_seconds, + queue_limit: _queue_limit, + stripe_customer_id: _stripe_customer_id, + metronome_customer_id: _metronome_customer_id, + salesforce_customer_id: _salesforce_customer_id, + third_party_billing: _third_party_billing, + billing_email_address: _billing_email_address, + locked: _locked, + lock_reason: _lock_reason, + lock_cause: _lock_cause, + develop_file_system: _develop_file_system, + unlocked_at: _unlocked_at, + unlock_if_subscription_renewed: _unlock_if_subscription_renewed, + enterprise_authentication_method: _enterprise_authentication_method, + enterprise_login_slug: _enterprise_login_slug, + enterprise_unique_identifier: _enterprise_unique_identifier, + business_critical: _business_critical, + created_at: _created_at, + updated_at: _updated_at, + starter_repo_url: _starter_repo_url, + git_auth_level: _git_auth_level, + identifier: _identifier, + trial_end_date: _trial_end_date, + static_subdomain: _static_subdomain, + run_locked_until: _run_locked_until, + docs_job_id: _docs_job_id, + freshness_job_id: _freshness_job_id + } = account + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_json.ex new file mode 100644 index 00000000..e28920eb --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/account/account_json.ex @@ -0,0 +1,23 @@ +defmodule JadeWeb.API.V2.AccountJSON do + @moduledoc """ + The Account JSON component. + + Renders one or multiple Accounts to a map. + """ + + def index(%{accounts: accounts}) do + %{data: data(accounts)} + end + + def show(%{account: account}) do + %{data: data(account)} + end + + defp data(accounts) when is_list(accounts) do + for account <- accounts, do: data(account) + end + + defp data(account) do + Map.from_struct(account) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_controller.ex new file mode 100644 index 00000000..90e8da61 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_controller.ex @@ -0,0 +1,46 @@ +defmodule JadeWeb.API.V2.EnvironmentController do + use JadeWeb, :controller + + alias Jade.Environments.EnvironmentRepo + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + + tags ["dbt-api"] + + operation(:index, + summary: "List all environments of an account.", + parameters: [ + account_id: [in: :path, description: "Environment ID", type: :integer, example: 1], + limit: Pagination.limit(), + offset: Pagination.offset() + ], + responses: [ + ok: Generic.response(Schemas.ListEnvironmentsResponse), + unauthorized: Generic.unauthorized() + ] + ) + + def index(conn, params) do + environments = EnvironmentRepo.list(params) + render(conn, :index, environments: environments) + end + + operation(:show, + summary: "Shows a single environment.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1], + id: [in: :path, description: "Environment ID", type: :integer, example: 1] + ], + responses: [ + ok: Generic.response(Schemas.ShowEnvironmentResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{id: environment_id, account_id: account_id}) do + with {:ok, environment} <- EnvironmentRepo.get(account_id, environment_id) do + render(conn, :show, environment: environment) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_controller_test.exs new file mode 100644 index 00000000..a0bacedb --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_controller_test.exs @@ -0,0 +1,91 @@ +defmodule JadeWeb.API.V2.EnvironmentControllerTest do + use JadeWeb.ConnCase, async: true + + defp build_path(account_id, environment_id \\ nil) do + ~p"/api/v2/accounts/#{account_id}/environments" <> "/#{environment_id}" + end + + setup %{conn: conn} do + user = insert(:user) + account = insert(:account) + project = insert(:project, account: account) + + token = insert_auth_token_for_user(user, account, nil, project) + conn = put_bearer_token(conn, token.key) + %{conn: conn, account: account, project: project, token: token} + end + + describe "index/2" do + test "returns a list of environments", ctx do + insert_pair(:environment, project: ctx.project) + + %{data: data} = + ctx.conn + |> get(build_path(ctx.account.id)) + |> json_response(200) + |> assert_schema("ListEnvironmentsResponse", api_spec()) + + assert length(data) == 2 + end + + test "returns an error if the path and token account_id don't match", ctx do + insert_pair(:environment, project: ctx.project) + another_account = insert(:account) + another_token = insert_auth_token_for_user(insert(:user), another_account, nil, nil) + + assert %{errors: %{message: message}} = + ctx.conn + |> put_bearer_token(another_token.key) + |> get(build_path(ctx.account.id)) + |> json_response(401) + |> assert_schema("ErrorResponse", api_spec()) + + assert message == "Invalid Account in Path. You have no accces to this account." + end + + test "paginates the response", ctx do + insert_list(3, :environment, project: ctx.project) + params = %{limit: 2, offset: 2} + + %{data: data} = + ctx.conn + |> get(build_path(ctx.account.id), params) + |> json_response(200) + |> assert_schema("ListEnvironmentsResponse", api_spec()) + + assert length(data) == 1 + end + end + + describe "show/2" do + test "returns a single environment", ctx do + environment = insert(:environment, project: ctx.project) + + %{data: res_environment} = + ctx.conn + |> get(build_path(ctx.account.id, environment.id)) + |> json_response(200) + |> assert_schema("ShowEnvironmentResponse", api_spec()) + + assert res_environment.id == environment.id + end + + test "returns an error if the environment belongs to another account", ctx do + environment = insert(:environment) + + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account.id, environment.id)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + + test "returns an error if a environment does not exist", ctx do + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account.id, 404)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_json.ex new file mode 100644 index 00000000..f2b6a8b7 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/environment/environment_json.ex @@ -0,0 +1,23 @@ +defmodule JadeWeb.API.V2.EnvironmentJSON do + @moduledoc """ + The Environment JSON component. + + Renders one or multiple Environmentss to a map. + """ + + def index(%{environments: environments}) do + %{data: data(environments)} + end + + def show(%{environment: environment}) do + %{data: data(environment)} + end + + defp data(environments) when is_list(environments) do + for environment <- environments, do: data(environment) + end + + defp data(environment) do + environment |> Map.from_struct() |> Map.drop([:services, :airflow_config]) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_controller.ex new file mode 100644 index 00000000..c02e6f75 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_controller.ex @@ -0,0 +1,44 @@ +defmodule JadeWeb.API.V2.JobController do + use JadeWeb, :controller + + alias Jade.Jobs.JobRepo + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + + tags ["dbt-api"] + + operation(:index, + summary: "List all jobs of an account.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1] + ], + responses: [ + ok: Generic.response(Schemas.ListJobsResponse), + unauthorized: Generic.unauthorized() + ] + ) + + def index(conn, params) do + jobs = JobRepo.list(params) + render(conn, :index, jobs: jobs) + end + + operation(:show, + summary: "Show a single job of an account.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1], + id: [in: :path, description: "Job ID", type: :string, example: "sample_project_1"] + ], + responses: [ + ok: Generic.response(Schemas.ShowJobResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{id: job_id, account_id: account_id}) do + with {:ok, job} <- JobRepo.get(account_id, job_id) do + render(conn, :show, job: job) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_controller_test.exs new file mode 100644 index 00000000..c0bb9080 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_controller_test.exs @@ -0,0 +1,146 @@ +defmodule JadeWeb.API.V2.JobControllerTest do + use JadeWeb.ConnCase, async: false + + alias Jade.JobIds.JobIdRepo + + defp build_path(account_id, job_id \\ nil) do + ~p"/api/v2/accounts/#{account_id}/jobs" <> "/#{job_id}" + end + + setup %{conn: conn} do + attrs = insert_two_accounts_with_repos() + user = insert(:user) + + token = insert_auth_token_for_user(user, attrs.account_1, nil, nil) + conn = put_bearer_token(conn, token.key) + + Map.merge(attrs, %{conn: conn, user: user, token: token}) + end + + describe "index/2" do + test "returns a list of jobs and creates integer IDs for them", ctx do + dag_1 = insert(:dag, repo: ctx.repo_1) + dag_2 = insert(:dag, repo: ctx.repo_1) + _ignored_dag = insert(:dag, repo: ctx.repo_2) + + %{data: data} = + ctx.conn + |> get(build_path(ctx.account_1.id)) + |> json_response(200) + |> assert_schema("ListJobsResponse", api_spec()) + + assert length(data) == 2 + assert_lists_equal(data, [dag_1, dag_2], &(&1.name == &2.dag_id)) + + job_ids = JobIdRepo.list() + assert_lists_equal(data, job_ids, &(&1.id == &2.id)) + assert_lists_equal(data, job_ids, &(&1.name == &2.dag_id)) + end + + test "returns an empty list if the account has no jobs", ctx do + _ignored_dag = insert(:dag, repo: ctx.repo_2) + + %{data: []} = + ctx.conn + |> get(build_path(ctx.account_1.id)) + |> json_response(200) + |> assert_schema("ListJobsResponse", api_spec()) + end + + test "returns an error if the path and token account_id don't match", ctx do + another_token = insert_auth_token_for_user(insert(:user), ctx.account_2, nil, nil) + + assert %{errors: %{message: "Invalid Account in Path. You have no accces to this account."}} = + ctx.conn + |> put_bearer_token(another_token.key) + |> get(build_path(ctx.account_1.id)) + |> json_response(401) + |> assert_schema("ErrorResponse", api_spec()) + end + end + + describe "show/2" do + test "returns a single job", ctx do + dag = insert(:dag, repo: ctx.repo_1) + _ignored_dag = insert(:dag, repo: ctx.repo_1) + _ignored_dag = insert(:dag, repo: ctx.repo_2) + _dag_with_same_id_in_different_repo = insert(:dag, dag_id: dag.dag_id, repo: ctx.repo_2) + job_id = insert(:job_id, environment_id: ctx.environment_1.id, dag_id: dag.dag_id) + + %{data: job} = + ctx.conn + |> get(build_path(ctx.account_1.id, job_id.id)) + |> json_response(200) + |> assert_schema("ShowJobResponse", api_spec()) + + assert job.id == job_id.id + assert job.dag_id == dag.dag_id + assert job.dag_id == job_id.dag_id + end + + test "loads the most recent job_run and most recent completed job_run", ctx do + dag = insert(:dag, repo: ctx.repo_1) + job_id = insert(:job_id, environment_id: ctx.environment_1.id, dag_id: dag.dag_id) + + most_recent_job_run = + insert(:dag_run, + dag_id: dag.dag_id, + state: :failed, + end_date: ~U[2023-01-01 12:00:00Z], + repo: ctx.repo_1 + ) + + most_recent_completed_job_run = + insert(:dag_run, + dag_id: dag.dag_id, + state: :success, + end_date: ~U[2023-01-01 11:00:00Z], + repo: ctx.repo_1 + ) + + _older_job_run = + insert(:dag_run, dag_id: dag.dag_id, end_date: ~U[2023-01-01 10:00:00Z], repo: ctx.repo_1) + + %{data: job} = + ctx.conn + |> get(build_path(ctx.account_1.id, job_id.id)) + |> json_response(200) + |> assert_schema("ShowJobResponse", api_spec()) + + assert job.most_recent_job_run.dag_run_id == most_recent_job_run.id + + assert job.most_recent_completed_job_run.dag_run_id == + most_recent_completed_job_run.id + end + + test "returns an error if a Job hasn't been fetched earlier and we don't have an ID -> JobId mapping", + ctx do + insert(:dag, repo: ctx.repo_1) + + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account_1.id, 1)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + + test "returns an error if the job belongs to another account", ctx do + dag = insert(:dag, repo: ctx.repo_2) + job_id = insert(:job_id, environment_id: ctx.environment_2.id, dag_id: dag.dag_id) + + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account_1.id, job_id.id)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + + test "returns an error if a job does not exist", ctx do + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account_1.id, 404)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_json.ex new file mode 100644 index 00000000..c7b9ddea --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job/job_json.ex @@ -0,0 +1,31 @@ +defmodule JadeWeb.API.V2.JobJSON do + @moduledoc """ + The Job JSON component. + + Renders one or multiple Jobs to a map. + """ + + alias JadeWeb.API.V2.JobRunJSON + + def index(%{jobs: jobs}) do + %{data: build(jobs)} + end + + def show(%{job: job}) do + %{data: build(job)} + end + + def build(jobs) when is_list(jobs) do + for job <- jobs, do: build(job) + end + + def build(job) do + job + |> Map.from_struct() + |> Map.put(:most_recent_job_run, JobRunJSON.build(job.most_recent_job_run)) + |> Map.put( + :most_recent_completed_job_run, + JobRunJSON.build(job.most_recent_completed_job_run) + ) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_controller.ex new file mode 100644 index 00000000..251446c0 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_controller.ex @@ -0,0 +1,44 @@ +defmodule JadeWeb.API.V2.JobRunController do + use JadeWeb, :controller + + alias Jade.JobRuns.JobRunRepo + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + + tags ["dbt-api"] + + operation(:index, + summary: "List all jobs of an account.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1] + ], + responses: %{ + 200 => Generic.response(Schemas.ListJobRunsResponse), + 401 => Generic.not_found() + } + ) + + def index(conn, params) do + job_runs = JobRunRepo.list(params) + render(conn, :index, job_runs: job_runs) + end + + operation(:show, + summary: "Show a single job of an account.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1], + id: [in: :path, description: "Job ID", type: :integer, example: 1] + ], + responses: [ + ok: Generic.response(Schemas.ShowJobRunResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{id: job_run_id, account_id: account_id}) do + with {:ok, job_run} <- JobRunRepo.get(account_id, job_run_id) do + render(conn, :show, job_run: job_run) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_controller_test.exs new file mode 100644 index 00000000..5e873e8a --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_controller_test.exs @@ -0,0 +1,97 @@ +defmodule JadeWeb.API.V2.JobRunControllerTest do + use JadeWeb.ConnCase, async: false + + alias Jade.JobRunIds.JobRunId + + defp build_path(account_id, job_run_id \\ nil) do + "/api/v2/accounts/#{account_id}/runs" <> "/#{job_run_id}" + end + + setup %{conn: conn} do + attrs = insert_two_accounts_with_repos() + user = insert(:user) + + token = + insert_auth_token_for_user(user, attrs.account_1, attrs.environment_1, attrs.project_1) + + conn = put_bearer_token(conn, token.key) + + Map.merge(attrs, %{conn: conn, user: user, token: token}) + end + + describe "index/2" do + test "returns a list of job_runs", ctx do + dag_run_1 = insert(:dag_run, repo: ctx.repo_1) + dag_run_2 = insert(:dag_run, repo: ctx.repo_1) + _ignored_dag_run = insert(:dag_run, repo: ctx.repo_2) + + %{data: data} = + ctx.conn + |> get(build_path(ctx.account_1.id)) + |> json_response(200) + |> assert_schema("ListJobRunsResponse", api_spec()) + + assert length(data) == 2 + + job_run_ids = Jade.Repo.all(JobRunId) + assert_lists_equal(data, job_run_ids, &(&1.id == &2.id)) + + assert_lists_equal( + job_run_ids, + [dag_run_1, dag_run_2], + &(&1.dag_run_id == &2.id && &1.environment_id == ctx.environment_1.id) + ) + end + + test "returns an empty list if the account has no job_runs", ctx do + _ignored_dag = insert(:dag, repo: ctx.repo_2) + + %{data: []} = + ctx.conn + |> get(build_path(ctx.account_1.id)) + |> json_response(200) + |> assert_schema("ListJobRunsResponse", api_spec()) + end + + test "returns an error if the path and token account_id don't match", ctx do + another_token = insert_auth_token_for_user(insert(:user), ctx.account_2, nil, nil) + + assert %{errors: %{message: message}} = + ctx.conn + |> put_bearer_token(another_token.key) + |> get(build_path(ctx.account_1.id)) + |> json_response(401) + |> assert_schema("ErrorResponse", api_spec()) + + assert message == "Invalid Account in Path. You have no accces to this account." + end + end + + describe "show/2" do + test "returns a single job_run", ctx do + dag_run = insert(:dag_run, repo: ctx.repo_1) + _ignored_dag_run = insert(:dag_run, repo: ctx.repo_1) + _ignored_dag_run = insert(:dag_run, repo: ctx.repo_2) + _dag_run_with_same_id_in_different_repo = insert(:dag_run, id: dag_run.id, repo: ctx.repo_2) + + job_run_id = + insert(:job_run_id, environment_id: ctx.environment_1.id, dag_run_id: dag_run.id) + + %{data: job_run} = + ctx.conn + |> get(build_path(ctx.account_1.id, job_run_id.id)) + |> json_response(200) + |> assert_schema("ShowJobRunResponse", api_spec()) + + assert job_run.id == job_run_id.id + end + + test "returns an error if a job_run does not exist", ctx do + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account_1.id, 404)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_json.ex new file mode 100644 index 00000000..12991616 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/job_run/job_run_json.ex @@ -0,0 +1,27 @@ +defmodule JadeWeb.API.V2.JobRunJSON do + @moduledoc """ + The JobRun JSON component. + + Renders one or multiple JobRuns to a map. + """ + + alias Jade.JobRuns.JobRun + + def index(%{job_runs: job_runs}) do + %{data: build(job_runs)} + end + + def show(%{job_run: job_run}) do + %{data: build(job_run)} + end + + def build(job_runs) when is_list(job_runs) do + for job_run <- job_runs, do: build(job_run) + end + + def build(%JobRun{} = job_run) do + job_run |> Map.from_struct() |> Map.delete(:job) + end + + def build(_job_run), do: nil +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_controller.ex new file mode 100644 index 00000000..2ca25203 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_controller.ex @@ -0,0 +1,30 @@ +defmodule JadeWeb.API.V2.ManifestController do + use JadeWeb, :controller + + alias Jade.Manifests.ManifestRepo + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + + tags ["dbt-api"] + + operation(:show, + summary: "Return an artifact for a JobRun.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1], + job_run_id: [in: :path, description: "JobRun ID", type: :integer, example: 1], + artifact: [in: :path, description: "Artifact type", type: :string, example: "manifest.json"] + ], + responses: [ + ok: Generic.response(Schemas.ShowArtifactResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{account_id: account_id, job_run_id: job_run_id, artifact: "manifest.json"}) do + with {:ok, manifest} <- ManifestRepo.get_by(account_id: account_id, job_run_id: job_run_id), + {:ok, file} <- ManifestRepo.download_file(manifest) do + render(conn, file: file) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_controller_test.exs new file mode 100644 index 00000000..5f2eaeb1 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_controller_test.exs @@ -0,0 +1,60 @@ +defmodule JadeWeb.ManifestControllerTest do + use JadeWeb.ConnCase, async: true + + defp build_path(account_id, job_run_id) do + ~p"/api/v2/accounts/#{account_id}/runs/#{job_run_id}/artifacts/manifest.json" + end + + setup %{conn: conn} do + user = insert(:user) + account = insert(:account) + token = insert_auth_token_for_user(user, account, nil, nil) + conn = put_bearer_token(conn, token.key) + %{conn: conn, account: account, token: token} + end + + describe "show" do + test "returns the manifest of a job_run", ctx do + job_run_id = insert(:job_run_id) + _manifest = insert(:manifest, account_id: ctx.account.id, job_run: job_run_id) + + res_manifest = + ctx.conn + |> get(build_path(ctx.account.id, job_run_id.id)) + |> json_response(200) + |> assert_schema("ShowArtifactResponse", api_spec()) + + assert res_manifest.data =~ + "test.balboa.dbt_utils_unique_combination_of_columns_country_populations_country_code__year.f0f4e51143" + end + + test "returns an error if the manifest belongs to another account", ctx do + job_run_id = insert(:job_run_id) + _manifest = insert(:manifest, job_run: job_run_id) + + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account.id, job_run_id.id)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + + test "returns an error if no manifest exists for the job_run", ctx do + job_run_id = insert(:job_run_id) + + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account.id, job_run_id.id)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + + test "returns an error if the job_run does not exist", ctx do + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account.id, 1)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_json.ex new file mode 100644 index 00000000..a3bd9b34 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/manifest/manifest_json.ex @@ -0,0 +1,8 @@ +defmodule JadeWeb.API.V2.ManifestJSON do + @doc """ + Renders a single manifest. + """ + def show(%{file: file}) do + %{data: file} + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_controller.ex new file mode 100644 index 00000000..fefa15f9 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_controller.ex @@ -0,0 +1,28 @@ +defmodule JadeWeb.API.V2.Project.LatestJobRunController do + use JadeWeb, :controller + + alias Jade.JobRuns.JobRunRepo + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + + tags ["dbt-api"] + + operation(:show, + summary: "Show the latest, successful job run of a project.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1], + project_id: [in: :path, description: "Project ID", type: :integer, example: 1] + ], + responses: [ + ok: Generic.response(Schemas.ShowJobRunResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, params) do + with {:ok, job_run} <- JobRunRepo.get_latest_for_project(params) do + render(conn, :show, job_run: job_run) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_controller_test.exs new file mode 100644 index 00000000..8e2d5350 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_controller_test.exs @@ -0,0 +1,57 @@ +defmodule JadeWeb.API.V2.Project.LatestJobRunControllerTest do + use JadeWeb.ConnCase, async: false + + defp build_path(account_id, project_id) do + "/api/v2/accounts/#{account_id}/projects/#{project_id}/latest-run" + end + + setup %{conn: conn} do + attrs = insert_two_accounts_with_repos() + user = insert(:user) + + token = + insert_auth_token_for_user(user, attrs.account_1, attrs.environment_1, attrs.project_1) + + conn = put_bearer_token(conn, token.key) + + Map.merge(attrs, %{conn: conn, user: user, token: token}) + end + + describe "show/2" do + test "returns the latest job run for a project", ctx do + insert(:dag_run, repo: ctx.repo_1, state: :success, end_date: ~U[2020-01-01 12:00:00Z]) + + latest_dag_run = + insert(:dag_run, repo: ctx.repo_1, state: :success, end_date: ~U[2020-01-02 12:00:00Z]) + + # Ignored DagRuns + insert(:dag_run, repo: ctx.repo_1, state: :success, end_date: nil) + insert(:dag_run, repo: ctx.repo_1, state: :failed, end_date: ~U[2020-01-01 10:00:00Z]) + insert(:dag_run, repo: ctx.repo_2, state: :success, end_date: ~U[2020-01-01 10:00:00Z]) + + %{data: job_run} = + ctx.conn + |> get(build_path(ctx.account_1.id, ctx.project_1.id)) + |> json_response(200) + |> assert_schema("ShowJobRunResponse", api_spec()) + + assert job_run.environment_slug == ctx.environment_1.slug + assert job_run.dag_id == latest_dag_run.dag_id + assert job_run.dag_run_id == latest_dag_run.id + assert job_run.dag_run_run_id == latest_dag_run.run_id + assert job_run.account_id == ctx.account_1.id + assert job_run.project_id == ctx.project_1.id + end + + test "returns 404 if no successful dag runs exist", ctx do + insert(:dag_run, repo: ctx.repo_1, state: :failed, end_date: ~U[2020-01-01 12:00:00Z]) + insert(:dag_run, repo: ctx.repo_1, state: :success, end_date: nil) + + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account_1.id, ctx.project_1.id)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_json.ex new file mode 100644 index 00000000..a06e8f41 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/latest_job_run_json.ex @@ -0,0 +1,15 @@ +defmodule JadeWeb.API.V2.Project.LatestJobRunJSON do + @moduledoc """ + The latest JobRun JSON component. + + Renders the latest JobRun for a given Project. + """ + + def show(%{job_run: job_run}) do + %{data: data(job_run)} + end + + def data(job_run) do + job_run |> Map.from_struct() |> Map.delete(:job) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_controller.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_controller.ex new file mode 100644 index 00000000..13d1f93f --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_controller.ex @@ -0,0 +1,46 @@ +defmodule JadeWeb.API.V2.ProjectController do + use JadeWeb, :controller + + alias Jade.Projects.ProjectRepo + + plug OpenApiSpex.Plug.CastAndValidate, json_render_error_v2: true + + tags ["dbt-api"] + + operation(:index, + summary: "List all projects of an account.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1], + limit: Pagination.limit(), + offset: Pagination.offset() + ], + responses: [ + ok: Generic.response(Schemas.ListProjectsResponse), + unauthorized: Generic.unauthorized() + ] + ) + + def index(conn, params) do + projects = ProjectRepo.list(params) + render(conn, :index, projects: projects) + end + + operation(:show, + summary: "Show a single project of an account.", + parameters: [ + account_id: [in: :path, description: "Account ID", type: :integer, example: 1], + id: [in: :path, description: "Project ID", type: :integer, example: 1] + ], + responses: [ + ok: Generic.response(Schemas.ShowProjectResponse), + unauthorized: Generic.unauthorized(), + not_found: Generic.not_found() + ] + ) + + def show(conn, %{id: project_id, account_id: account_id}) do + with {:ok, project} <- ProjectRepo.get_by(id: project_id, account_id: account_id) do + render(conn, :show, project: project) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_controller_test.exs b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_controller_test.exs new file mode 100644 index 00000000..57113c1c --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_controller_test.exs @@ -0,0 +1,94 @@ +defmodule JadeWeb.API.V2.ProjectControllerTest do + use JadeWeb.ConnCase, async: true + + defp build_path(account_id, project_id \\ nil) do + ~p"/api/v2/accounts/#{account_id}/projects" <> "/#{project_id}" + end + + setup %{conn: conn} do + user = insert(:user) + account = insert(:account) + token = insert_auth_token_for_user(user, account, nil, nil) + conn = put_bearer_token(conn, token.key) + %{conn: conn, user: user, account: account, token: token} + end + + describe "index/2" do + test "returns a list of projects", ctx do + insert_pair(:project, account: ctx.account) + + %{data: data} = + ctx.conn + |> get(build_path(ctx.account.id)) + |> json_response(200) + |> assert_schema("ListProjectsResponse", api_spec()) + + assert length(data) == 2 + end + + test "returns an error if the path and token account_id don't match", ctx do + insert_pair(:project, account: ctx.account) + another_account = insert(:account) + another_token = insert_auth_token_for_user(insert(:user), another_account, nil, nil) + + assert %{errors: %{message: message}} = + ctx.conn + |> put_bearer_token(another_token.key) + |> get(build_path(ctx.account.id)) + |> json_response(401) + |> assert_schema("ErrorResponse", api_spec()) + + assert message == "Invalid Account in Path. You have no accces to this account." + end + + test "paginates the response", ctx do + insert_list(3, :project, account: ctx.account) + params = %{limit: 2, offset: 2} + + %{data: data} = + ctx.conn + |> get(build_path(ctx.account.id), params) + |> json_response(200) + |> assert_schema("ListProjectsResponse", api_spec()) + + assert length(data) == 1 + end + end + + describe "show/2" do + test "returns a single project", ctx do + project = insert(:project, account: ctx.account) + + %{data: res_project} = + ctx.conn + |> get(build_path(ctx.account.id, project.id)) + |> json_response(200) + |> assert_schema("ShowProjectResponse", api_spec()) + + assert res_project.id == project.id + end + + test "returns an error if the project belongs to another account", ctx do + another_account = insert(:account) + project = insert(:project, account: another_account) + + %{errors: %{message: "Not Found"}} = + ctx.conn + |> get(build_path(ctx.account.id, project.id)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + + test "returns an error if a project does not exist", ctx do + %{ + errors: %{ + message: "Not Found" + } + } = + ctx.conn + |> get(build_path(ctx.account.id, 404)) + |> json_response(404) + |> assert_schema("ErrorResponse", api_spec()) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_json.ex b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_json.ex new file mode 100644 index 00000000..94da566c --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/api/v2/project/project_json.ex @@ -0,0 +1,23 @@ +defmodule JadeWeb.API.V2.ProjectJSON do + @moduledoc """ + The Project JSON component. + + Renders one or multiple Projects to a map. + """ + + def index(%{projects: projects}) do + %{data: data(projects)} + end + + def show(%{project: project}) do + %{data: data(project)} + end + + defp data(projects) when is_list(projects) do + for project <- projects, do: data(project) + end + + defp data(project) do + Map.from_struct(project) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/changeset_json.ex b/src/core/dbt-api/lib/jade_web/controllers/changeset_json.ex new file mode 100644 index 00000000..88916463 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/changeset_json.ex @@ -0,0 +1,25 @@ +defmodule JadeWeb.ChangesetJSON do + @doc """ + Renders changeset errors. + """ + def error(%{changeset: changeset}) do + # When encoded, the changeset returns its errors + # as a JSON object. So we just pass it forward. + %{errors: Ecto.Changeset.traverse_errors(changeset, &translate_error/1)} + end + + defp translate_error({msg, opts}) do + # You can make use of gettext to translate error messages by + # uncommenting and adjusting the following code: + + # if count = opts[:count] do + # Gettext.dngettext(JadeWeb.Gettext, "errors", msg, msg, count, opts) + # else + # Gettext.dgettext(JadeWeb.Gettext, "errors", msg, opts) + # end + + Enum.reduce(opts, msg, fn {key, value}, acc -> + String.replace(acc, "%{#{key}}", fn _ -> to_string(value) end) + end) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/error_html.ex b/src/core/dbt-api/lib/jade_web/controllers/error_html.ex new file mode 100644 index 00000000..cc537310 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/error_html.ex @@ -0,0 +1,19 @@ +defmodule JadeWeb.ErrorHTML do + use JadeWeb, :html + + # If you want to customize your error pages, + # uncomment the embed_templates/1 call below + # and add pages to the error directory: + # + # * lib/jade_web/controllers/error_html/404.html.heex + # * lib/jade_web/controllers/error_html/500.html.heex + # + # embed_templates "error_html/*" + + # The default is to render a plain text page based on + # the template name. For example, "404.html" becomes + # "Not Found". + def render(template, _assigns) do + Phoenix.Controller.status_message_from_template(template) + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/error_html_test.exs b/src/core/dbt-api/lib/jade_web/controllers/error_html_test.exs new file mode 100644 index 00000000..ed1d1ede --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/error_html_test.exs @@ -0,0 +1,14 @@ +defmodule JadeWeb.ErrorHTMLTest do + use JadeWeb.ConnCase, async: true + + # Bring render_to_string/4 for testing custom views + import Phoenix.Template + + test "renders 404.html" do + assert render_to_string(JadeWeb.ErrorHTML, "404", "html", []) == "Not Found" + end + + test "renders 500.html" do + assert render_to_string(JadeWeb.ErrorHTML, "500", "html", []) == "Internal Server Error" + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/error_json.ex b/src/core/dbt-api/lib/jade_web/controllers/error_json.ex new file mode 100644 index 00000000..06f2789e --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/error_json.ex @@ -0,0 +1,28 @@ +defmodule JadeWeb.ErrorJSON do + # If you want to customize a particular status code, + # you may add your own clauses, such as: + # + # def render("500.json", _assigns) do + # %{errors: %{message: "Internal Server Error"}} + # end + + # By default, Phoenix returns the status message from + # the template name. For example, "404.json" becomes + # "Not Found". + def render(template, assigns) do + message = assigns[:message] || Phoenix.Controller.status_message_from_template(template) + build_errors(message) + end + + @doc """ + A small helper function for formatting the json error message. + This is used in Plugs to put the correct error response body. + """ + def send_json(conn, message) do + Phoenix.Controller.json(conn, build_errors(message)) + end + + defp build_errors(message) do + %{errors: %{message: message}} + end +end diff --git a/src/core/dbt-api/lib/jade_web/controllers/error_json_test.exs b/src/core/dbt-api/lib/jade_web/controllers/error_json_test.exs new file mode 100644 index 00000000..7e0d742c --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/controllers/error_json_test.exs @@ -0,0 +1,12 @@ +defmodule JadeWeb.ErrorJSONTest do + use JadeWeb.ConnCase, async: true + + test "renders 404" do + assert JadeWeb.ErrorJSON.render("404.json", %{}) == %{errors: %{message: "Not Found"}} + end + + test "renders 500" do + assert JadeWeb.ErrorJSON.render("500.json", %{}) == + %{errors: %{message: "Internal Server Error"}} + end +end diff --git a/src/core/dbt-api/lib/jade_web/endpoint.ex b/src/core/dbt-api/lib/jade_web/endpoint.ex new file mode 100644 index 00000000..3d9d8622 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/endpoint.ex @@ -0,0 +1,54 @@ +defmodule JadeWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :jade + + # The session will be stored in the cookie and signed, + # this means its contents can be read but not tampered with. + # Set :encryption_salt if you would also like to encrypt it. + @session_options [ + store: :cookie, + key: "_jade_key", + signing_salt: "VrWoHGn+", + same_site: "Lax" + ] + + # Disable LiveView until we need it. + # socket "/live", Phoenix.LiveView.Socket, websocket: [connect_info: [session: @session_options]] + + # Serve at "/" the static files from "priv/static" directory. + # + # You should set gzip to true if you are running phx.digest + # when deploying your static files in production. + plug Plug.Static, + at: "/", + from: :jade, + gzip: false, + only: JadeWeb.static_paths() + + # Code reloading can be explicitly enabled under the + # :code_reloader configuration of your endpoint. + if code_reloading? do + socket "/phoenix/live_reload/socket", Phoenix.LiveReloader.Socket + plug Phoenix.LiveReloader + plug Phoenix.CodeReloader + plug Phoenix.Ecto.CheckRepoStatus, otp_app: :jade + end + + plug Phoenix.LiveDashboard.RequestLogger, + param_key: "request_logger", + cookie_key: "request_logger" + + plug Plug.RequestId + plug Plug.Telemetry, event_prefix: [:phoenix, :endpoint] + + plug Plug.Parsers, + parsers: [:urlencoded, :multipart, :json], + # Max file size for uploads: 2 GB + length: 2_000_000_000, + pass: ["*/*"], + json_decoder: Phoenix.json_library() + + plug Plug.MethodOverride + plug Plug.Head + plug Plug.Session, @session_options + plug JadeWeb.Router +end diff --git a/src/core/dbt-api/lib/jade_web/gettext.ex b/src/core/dbt-api/lib/jade_web/gettext.ex new file mode 100644 index 00000000..b50b94cd --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/gettext.ex @@ -0,0 +1,24 @@ +defmodule JadeWeb.Gettext do + @moduledoc """ + A module providing Internationalization with a gettext-based API. + + By using [Gettext](https://hexdocs.pm/gettext), + your module gains a set of macros for translations, for example: + + use Gettext, backend: JadeWeb.Gettext + + # Simple translation + gettext("Here is the string to translate") + + # Plural translation + ngettext("Here is the string to translate", + "Here are the strings to translate", + 3) + + # Domain-based translation + dgettext("errors", "Here is the error message to translate") + + See the [Gettext Docs](https://hexdocs.pm/gettext) for detailed usage. + """ + use Gettext.Backend, otp_app: :jade +end diff --git a/src/core/dbt-api/lib/jade_web/open_api/generic.ex b/src/core/dbt-api/lib/jade_web/open_api/generic.ex new file mode 100644 index 00000000..23a4f144 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/open_api/generic.ex @@ -0,0 +1,67 @@ +defmodule JadeWeb.OpenApi.Generic do + alias OpenApiSpex.Schema + + alias JadeWeb.OpenApi.Schemas + + # Schema types + + def integer(opts \\ []), do: to_schema([type: :integer] ++ opts) + def string(opts \\ []), do: to_schema([type: :string] ++ opts) + def boolean(opts \\ []), do: to_schema([type: :boolean] ++ opts) + + def datetime(opts \\ []), do: to_schema([type: :string, format: :"date-time"] ++ opts) + + def datetime_unix(opts \\ []) do + to_schema([type: :integer, description: "Unix Timestamp in Seconds"] ++ opts) + end + + def map(opts \\ []) do + to_schema([type: :object] ++ opts) + end + + def array_of(schema, opts \\ []) do + opts = [type: :array, items: schema] ++ opts + to_schema(opts) + end + + def one_of(schemas, _opts \\ []) do + %OpenApiSpex.Schema{ + oneOf: schemas + } + end + + def enum(values, opts \\ []) do + opts = [type: :string, enum: values] ++ opts + to_schema(opts) + end + + def nullable(module) do + %OpenApiSpex.Schema{ + nullable: true, + allOf: [module] + } + end + + defp to_schema(opts) do + # Make all fields nullable by default. + opts = Keyword.put_new(opts, :nullable, true) + struct(Schema, opts) + end + + # Responses + + def response(schema, name \\ nil) + + def response(schema, nil) do + name = schema |> to_string() |> String.split(".") |> List.last() + {name, "application/json", schema} + end + + def response(schema, name) do + {name, "applicatin/json", schema} + end + + def ok(), do: response(Schemas.SuccessResponse, "SuccessResponse") + def not_found(), do: response(Schemas.ErrorResponse, "NotFoundError") + def unauthorized(), do: response(Schemas.ErrorResponse, "UnauthorizedError") +end diff --git a/src/core/dbt-api/lib/jade_web/open_api/schemas.ex b/src/core/dbt-api/lib/jade_web/open_api/schemas.ex new file mode 100644 index 00000000..ca54f5cd --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/open_api/schemas.ex @@ -0,0 +1,523 @@ +defmodule JadeWeb.OpenApi.Schemas do + # Accounts + + defmodule Account do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "An account", + type: :object, + additionalProperties: false, + properties: %{ + id: Generic.integer(), + name: Generic.string(), + state: Generic.integer(), + plan: Generic.string(), + pending_cancel: Generic.boolean(), + run_slots: Generic.integer(), + developer_seats: Generic.integer(), + it_seats: Generic.integer(), + read_only_seats: Generic.integer(), + pod_memory_request_mebibytes: Generic.integer(), + run_duration_limit_seconds: Generic.integer(), + queue_limit: Generic.integer(), + stripe_customer_id: Generic.integer(), + metronome_customer_id: Generic.integer(), + salesforce_customer_id: Generic.integer(), + third_party_billing: Generic.boolean(), + billing_email_address: Generic.string(), + locked: Generic.boolean(), + lock_reason: Generic.string(), + lock_cause: Generic.string(), + develop_file_system: Generic.boolean(), + unlocked_at: Generic.datetime(), + unlock_if_subscription_renewed: Generic.boolean(), + enterprise_authentication_method: Generic.string(), + enterprise_login_slug: Generic.string(), + enterprise_unique_identifier: Generic.string(), + business_critical: Generic.boolean(), + created_at: Generic.datetime(), + updated_at: Generic.datetime(), + starter_repo_url: Generic.string(), + git_auth_level: Generic.string(), + identifier: Generic.string(), + trial_end_date: Generic.datetime(), + static_subdomain: Generic.string(), + run_locked_until: Generic.datetime(), + docs_job_id: Generic.integer(), + freshness_job_id: Generic.integer() + } + }) + end + + defmodule ShowAccountResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.Account + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Account + } + }) + end + + # Projects + + defmodule Project do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "An account's project", + type: :object, + additionalProperties: false, + properties: %{ + id: Generic.integer(nullable: false), + name: Generic.string(), + slug: Generic.string(), + account_id: Generic.integer(nullable: false), + connection_id: Generic.integer(), + repository_id: Generic.integer(), + semantic_layer_id: Generic.integer(), + integration_entity_id: Generic.integer(), + skipped_setup: Generic.boolean(), + state: Generic.integer(), + dbt_project_subdirectory: Generic.string(), + docs_job_id: Generic.integer(), + freshness_job_id: Generic.integer(), + created_at: Generic.datetime(), + updated_at: Generic.datetime() + } + }) + end + + defmodule ListProjectsResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.Project + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Generic.array_of(Project) + } + }) + end + + defmodule ShowProjectResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.Project + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Project + } + }) + end + + # Environments + + defmodule Environment do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "An account's environment", + type: :object, + additionalProperties: false, + properties: %{ + id: Generic.integer(nullable: false), + project_id: Generic.integer(nullable: false), + account_id: Generic.integer(), + connection_id: Generic.integer(), + credentials_id: Generic.integer(), + created_by_id: Generic.integer(), + extended_attributes_id: Generic.integer(), + repository_id: Generic.integer(), + name: Generic.string(), + slug: Generic.string(), + dbt_project_subdirectory: Generic.string(), + use_custom_branch: Generic.boolean(), + custom_branch: Generic.string(), + dbt_version: Generic.string(), + raw_dbt_version: Generic.string(), + supports_docs: Generic.boolean(), + state: Generic.integer(), + custom_environment_variables: Generic.string(), + created_at: Generic.datetime(), + updated_at: Generic.datetime() + } + }) + end + + defmodule ListEnvironmentsResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.Environment + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Generic.array_of(Environment) + } + }) + end + + defmodule ShowEnvironmentResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.Environment + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Environment + } + }) + end + + # Job Runs + + defmodule JobRun do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "A Run of a Job", + type: :object, + additionalProperties: false, + properties: %{ + id: Generic.integer(), + dbt_job_run_ext_id: Generic.integer(), + job_id: Generic.integer(), + dag_id: Generic.string(), + dag_run_id: Generic.integer(), + dag_run_run_id: Generic.string(), + trigger_id: Generic.integer(), + environment_id: Generic.integer(), + environment_slug: Generic.string(), + account_id: Generic.integer(), + project_id: Generic.integer(), + job_definition_id: Generic.string(), + status: Generic.integer(description: "1: Success. 2: Error. 3: Queued. 5: Running."), + dbt_version: Generic.string(), + git_branch: Generic.string(), + git_sha: Generic.string(), + status_message: Generic.string(), + owner_thread_id: Generic.string(), + executed_by_thread_id: Generic.string(), + deferring_run_id: Generic.integer(), + artifacts_saved: Generic.boolean(), + artifact_s3_path: Generic.string(), + has_docs_generated: Generic.boolean(), + has_sources_generated: Generic.boolean(), + notifications_sent: Generic.boolean(), + blocked_by: Generic.array_of(Generic.integer()), + scribe_enabled: Generic.boolean(), + completed_at: Generic.datetime(), + created_at: Generic.datetime(), + updated_at: Generic.datetime(), + queued_at: Generic.datetime(), + dequeued_at: Generic.datetime(), + started_at: Generic.datetime(), + finished_at: Generic.datetime(), + last_checked_at: Generic.datetime(), + last_heartbeat_at: Generic.datetime(), + should_start_at: Generic.datetime(), + status_humanized: Generic.string(), + in_progress: Generic.boolean(), + is_complete: Generic.boolean(), + is_success: Generic.boolean(), + is_error: Generic.boolean(), + is_cancelled: Generic.boolean(), + is_running: Generic.boolean(), + duration: Generic.integer(description: "The queued_at until finished_at duration in seconds."), + queued_duration: Generic.integer(description: "The queued_at until started_at duration in seconds."), + run_duration: Generic.integer(description: "The started_at until finished_at duration in seconds"), + duration_humanized: Generic.string(), + queued_duration_humanized: Generic.string(), + run_duration_humanized: Generic.string(), + created_at_humanized: Generic.string(), + finished_at_humanized: Generic.string() + } + }) + end + + defmodule ListJobRunsResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.JobRun + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Generic.array_of(JobRun) + } + }) + end + + defmodule ShowJobRunResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.JobRun + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: JobRun + } + }) + end + + # Jobs + + defmodule Job do + use JadeWeb, :openapi_schema + + alias Jade.Jobs.Job + + OpenApiSpex.schema(%{ + description: "A Job of a Project", + type: :object, + additionalProperties: false, + properties: %{ + id: Generic.integer(nullable: false), + project_id: Generic.integer(), + dbt_job_ext_id: Generic.integer(), + environment_id: Generic.integer(), + dag_id: Generic.string(), + deferring_job_definition_id: Generic.integer(), + deferring_environment_id: Generic.integer(), + lifecycle_webhooks: Generic.boolean(), + lifecycle_webhooks_url: Generic.string(), + account_id: Generic.integer(), + name: Generic.string(), + description: Generic.string(), + dbt_version: Generic.string(), + raw_dbt_version: Generic.string(), + triggers: Generic.enum(Job.valid_triggers()), + created_at: Generic.datetime(), + updated_at: Generic.datetime(), + schedule: Generic.string(), + settings: Generic.map(), + execution: Generic.map(), + state: Generic.integer(), + generate_docs: Generic.boolean(), + run_generate_sources: Generic.boolean(), + most_recent_completed_run: Generic.integer(), + most_recent_run: Generic.integer(), + is_deferrable: Generic.boolean(), + deactivated: Generic.boolean(), + run_failure_count: Generic.integer(), + job_type: Generic.string(), + triggers_on_draft_pr: Generic.boolean(), + most_recent_job_run: Generic.nullable(JobRun), + most_recent_completed_job_run: Generic.nullable(JobRun) + } + }) + end + + defmodule ListJobsResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.Job + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Generic.array_of(Job) + } + }) + end + + defmodule ShowJobResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.Job + + OpenApiSpex.schema(%{ + type: :object, + properties: %{ + data: Job + } + }) + end + + # File + + defmodule FileContent do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "A File content.", + type: :string, + format: :binary + }) + end + + defmodule FileUpload do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "An upload of one multipart/form-data file.", + type: :object, + additionalProperties: false, + properties: %{ + tag: Generic.string(), + file: FileContent + } + }) + end + + defmodule MultipleFilesUpload do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "An upload of multiple multipart/form-data files.", + type: :object, + additionalProperties: true, + properties: %{ + files: Generic.array_of(FileUpload) + } + }) + end + + defmodule OneOrMultipleFileUploads do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "An upload of one or multiple multipart/form-data files.", + oneOf: [ + FileUpload, + MultipleFilesUpload + ] + }) + end + + defmodule File do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "A Datacoves File.", + type: :object, + additionalProperties: false, + properties: %{ + id: Generic.integer(nullable: false), + tag: Generic.string(), + filename: Generic.string(), + environment_slug: Generic.string(), + path: Generic.string(), + inserted_at: Generic.datetime(), + updated_at: Generic.datetime() + } + }) + end + + defmodule FileResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.File + + OpenApiSpex.schema(%{ + description: "A File response.", + type: :object, + properties: %{ + data: File + } + }) + end + + defmodule ListFilesResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.File + + OpenApiSpex.schema(%{ + description: "A list of Files response.", + type: :object, + properties: %{ + data: Generic.array_of(File) + } + }) + end + + defmodule FileOrFilesResponse do + use JadeWeb, :openapi_schema + + alias JadeWeb.OpenApi.Schemas.FileResponse + alias JadeWeb.OpenApi.Schemas.ListFilesResponse + + OpenApiSpex.schema(%{ + description: "A file or list of files response.", + type: :object, + properties: %{ + data: Generic.one_of(FileResponse, ListFilesResponse) + } + }) + end + + # Artifacts + + defmodule ShowArtifactResponse do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + description: "An Artifact for a JobRun. Returns the artifact content only.", + type: :object, + additionalProperties: false, + properties: %{ + data: FileContent + } + }) + end + + # Generic Responses + + defmodule SuccessResponse do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + type: :string + }) + end + + defmodule ErrorResponse do + use JadeWeb, :openapi_schema + + OpenApiSpex.schema(%{ + type: :object, + additionalProperties: false, + properties: %{ + errors: %Schema{ + type: :object, + properties: %{ + message: Generic.string(nullable: false) + } + } + } + }) + end + + # Pagination Parameters + + defmodule Pagination do + def limit() do + [ + in: :query, + description: "Limits the number of items in the response.", + type: :integer, + example: 20 + ] + end + + def offset() do + [ + in: :query, + description: "Offsets the primary key of the items in the response.", + type: :integer, + example: 20 + ] + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/open_api/spec.ex b/src/core/dbt-api/lib/jade_web/open_api/spec.ex new file mode 100644 index 00000000..fb021344 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/open_api/spec.ex @@ -0,0 +1,37 @@ +defmodule JadeWeb.OpenApi.Spec do + alias OpenApiSpex.{Components, Info, SecurityScheme, OpenApi, Paths, Server} + alias JadeWeb.{Endpoint, Router} + @behaviour OpenApi + + @impl OpenApi + def spec do + server_url = build_server_url() + + %OpenApi{ + servers: [ + # Populate the Server info from a phoenix endpoint + %Server{url: server_url} + ], + info: %Info{ + title: "Jade", + version: Jade.MixProject.version() + }, + components: %Components{ + securitySchemes: %{"authorization" => %SecurityScheme{type: "http", scheme: "bearer"}} + }, + security: [%{"authorization" => []}], + # Populate the paths from a phoenix router + paths: Paths.from_router(Router) + } + # Discover request/response schemas from path specs + |> OpenApiSpex.resolve_schema_modules() + end + + defp build_server_url() do + uri = Endpoint.struct_url() + path = Endpoint.path("") || "/" + host = "dbt." <> uri.host + uri = %{uri | path: path, host: host} + URI.to_string(uri) + end +end diff --git a/src/core/dbt-api/lib/jade_web/plugs/authenticate_api_key.ex b/src/core/dbt-api/lib/jade_web/plugs/authenticate_api_key.ex new file mode 100644 index 00000000..5120813a --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/plugs/authenticate_api_key.ex @@ -0,0 +1,98 @@ +defmodule JadeWeb.Plugs.AuthenticateApiKey do + @moduledoc """ + Verifies a request made by a Service Account like e.g. the Airflow Service Account + which automatically uploads the manifest.json to Jade after a DAGRun completes. + + This plug takes a bearer token, compares it with a Django AuthToken in the Datacoves + database, and checks that the user associated with the token has the correct permission + to upload manifests to the environment specified in the parameters. + """ + + require Logger + + alias Jade.Auth + + alias JadeWeb.API.FallbackController + alias JadeWeb.Plugs.Utils + + def init(opts), do: opts + + def call(conn, _opts) do + with {:ok, bearer_token} <- Utils.get_bearer_token(conn), + {:ok, api_key_details} <- Auth.fetch_api_key_details(bearer_token), + :ok <- check_permission(conn, api_key_details) do + conn + else + {:error, :missing_api_key} -> + message = "Missing API Key. Please create a Bearer Token first." + FallbackController.unauthenticated(conn, message) + + {:error, :invalid_api_key} -> + message = "Invalid API Key. Please use a valid Bearer Token." + FallbackController.unauthenticated(conn, message) + + {:error, :api_key_not_found} -> + message = "Invalid API Key. Please use a valid Bearer Token." + FallbackController.unauthenticated(conn, message) + + {:error, :no_account_permission} -> + message = "Invalid Account in Path. You have no accces to this account." + FallbackController.unauthenticated(conn, message) + + {:error, :no_project_permission} -> + message = "Invalid Project in Path. You have no accces to this project." + FallbackController.unauthenticated(conn, message) + + {:error, :no_environment_permission} -> + message = "Invalid Environment in Path. You have no accces to this environment." + FallbackController.unauthenticated(conn, message) + + {:error, :missing_path_parameter} -> + message = "Invalid Path. You requested a non-existent path." + FallbackController.unauthenticated(conn, message) + end + end + + defp check_permission( + %{params: %{"account_id" => account_id}} = _conn, + %{"account_ids" => account_ids} = _api_key_details + ) do + with {account_id, ""} <- Integer.parse(account_id), + true <- account_id in account_ids do + :ok + else + _error -> + {:error, :no_account_permission} + end + end + + defp check_permission( + %{params: %{"project_slug" => project_slug}} = _conn, + %{"projects" => project_slugs} = _api_key_details + ) do + if project_slug in project_slugs do + :ok + else + {:error, :no_project_permission} + end + end + + defp check_permission( + %{params: %{"environment_slug" => environment_slug}} = _conn, + %{"environments" => environment_slugs} = _api_key_details + ) do + if environment_slug in environment_slugs do + :ok + else + {:error, :no_environment_permission} + end + end + + defp check_permission(conn, api_key_details) do + Logger.error( + "AuthenticateApiKey missed a path parameter or api key details: #{inspect(conn.params)} - #{inspect(api_key_details)}" + ) + + {:error, :missing_path_parameter} + end +end diff --git a/src/core/dbt-api/lib/jade_web/plugs/authenticate_api_key_test.exs b/src/core/dbt-api/lib/jade_web/plugs/authenticate_api_key_test.exs new file mode 100644 index 00000000..bcc0ed6f --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/plugs/authenticate_api_key_test.exs @@ -0,0 +1,79 @@ +defmodule JadeWeb.Plugs.AuthenticateApiKeyTest do + use JadeWeb.ConnCase, async: true + + alias JadeWeb.Plugs.AuthenticateApiKey + + import ExUnit.CaptureLog + + setup do + user = insert(:user, is_service_account: true) + environment = insert(:environment, slug: "env123") + auth_token = insert_auth_token_for_user(user, nil, environment, nil) + + %{user: user, auth_token: auth_token} + end + + test "allows a conn with valid bearer token to pass", %{auth_token: auth_token} do + conn = + build_conn(:get, ~p"/api/v2/datacoves/manifests", %{"environment_slug" => "env123"}) + |> put_bearer_token(auth_token.key) + |> AuthenticateApiKey.call([]) + + refute conn.halted + end + + test "halts if the user of the bearer token has no permissions at all" do + user = insert(:user, is_service_account: true) + auth_token = insert(:auth_token, user: user) + + conn = + build_conn(:get, ~p"/api/v2/datacoves/manifests", %{"environment_slug" => "env123"}) + |> put_bearer_token(auth_token.key) + |> AuthenticateApiKey.call([]) + + assert conn.halted + end + + test "halts if the user of the bearer token has no matching permission" do + user = insert(:user, is_service_account: true) + auth_token = insert(:auth_token, user: user) + + conn = + build_conn(:get, ~p"/api/v2/datacoves/manifests", %{"environment_slug" => "env123"}) + |> put_bearer_token(auth_token.key) + |> AuthenticateApiKey.call([]) + + assert conn.halted + end + + test "halts the conn and writes a log if an invalid internal bearer token was given", %{ + conn: conn + } do + assert capture_log(fn -> + conn = + conn + |> put_bearer_token("invalid-token") + |> AuthenticateApiKey.call([]) + + assert conn.halted + + assert %{"errors" => %{"message" => message}} = Jason.decode!(conn.resp_body) + assert message == "Invalid API Key. Please use a valid Bearer Token." + end) =~ "Verifying ApiKey returned 401 with \"Invalid token\"" + end + + test "halts the conn if no bearer token was given", %{conn: conn} do + conn = AuthenticateApiKey.call(conn, []) + assert conn.halted + end + + test "halts the conn if no path arameter was given", %{conn: conn} do + user = insert(:user, is_service_account: true) + auth_token = insert(:auth_token, user: user) + + assert capture_log(fn -> + conn = conn |> put_bearer_token(auth_token.key) |> AuthenticateApiKey.call([]) + assert conn.halted + end) =~ "AuthenticateApiKey missed a path parameter or api key details" + end +end diff --git a/src/core/dbt-api/lib/jade_web/plugs/utils.ex b/src/core/dbt-api/lib/jade_web/plugs/utils.ex new file mode 100644 index 00000000..1f70e49b --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/plugs/utils.ex @@ -0,0 +1,20 @@ +defmodule JadeWeb.Plugs.Utils do + @moduledoc """ + Holds utility functions for custom Plugs. + """ + + import Plug.Conn + + def get_bearer_token(conn) do + authorization_header = + conn + |> get_req_header("authorization") + |> List.first() + + case authorization_header do + "Bearer " <> token -> {:ok, String.trim(token)} + "Token " <> token -> {:ok, String.trim(token)} + _ -> {:error, :missing_api_key} + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/router.ex b/src/core/dbt-api/lib/jade_web/router.ex new file mode 100644 index 00000000..31dfb478 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/router.ex @@ -0,0 +1,108 @@ +defmodule JadeWeb.Router do + use JadeWeb, :router + + pipeline :browser do + plug(:accepts, ["html"]) + plug(:fetch_session) + plug(:fetch_live_flash) + plug(:put_root_layout, html: {JadeWeb.Layouts, :root}) + plug(:protect_from_forgery) + plug(:put_secure_browser_headers) + end + + pipeline :api do + plug(:accepts, ["json"]) + plug OpenApiSpex.Plug.PutApiSpec, module: JadeWeb.OpenApi.Spec + end + + scope "/api/v2" do + pipe_through(:api) + + scope "/accounts/:account_id", JadeWeb.API.V2 do + pipe_through(JadeWeb.Plugs.AuthenticateApiKey) + + resources("/", AccountController, only: [:show], singleton: true) + + resources("/projects", ProjectController, only: [:index, :show]) do + resources("/latest-run", Project.LatestJobRunController, + only: [:show], + singleton: true + ) + end + + resources("/environments", EnvironmentController, only: [:index, :show]) + resources("/jobs", JobController, only: [:index, :show]) + + resources("/runs", JobRunController, only: [:index, :show]) do + resources("/artifacts/:artifact", ManifestController, only: [:show], singleton: true) + end + end + + scope "/datacoves", JadeWeb.API.Datacoves do + pipe_through(JadeWeb.Plugs.AuthenticateApiKey) + + resources("/manifests", ManifestController, only: [:create, :show], singleton: true) + + resources "/environments", EnvironmentController, param: :slug, only: [] do + resources("/files", FileController, + only: [:create, :show, :update, :delete], + param: :slug, + singleton: true + ) + end + + get("/projects/:project_slug/latest-manifest", ProjectManifestController, :show) + end + + get("/healthcheck", JadeWeb.API.Datacoves.HealthcheckController, :show, singleton: true) + end + + # TODO: Remove this scope once we migrated to /api/v2/datacoves + scope "/api/internal", JadeWeb.API.Datacoves do + pipe_through([:api]) + + get("/healthcheck", HealthcheckController, :show, singleton: true) + end + + scope "/api/internal", JadeWeb.API.Datacoves do + pipe_through([:api, JadeWeb.Plugs.AuthenticateApiKey]) + + resources("/manifests", ManifestController, only: [:create, :show], singleton: true) + + resources "/environments", EnvironmentController, param: :slug, only: [] do + resources("/files", FileController, + only: [:create, :show, :update, :delete], + param: :slug, + singleton: true + ) + end + + get("/projects/:project_slug/latest-manifest", ProjectManifestController, :show) + end + + scope "/api/v2" do + pipe_through(:api) + get("/openapi", OpenApiSpex.Plug.RenderSpec, []) + end + + scope "/api/v2" do + pipe_through :browser + get "/swaggerui", OpenApiSpex.Plug.SwaggerUI, path: "/api/v2/openapi" + end + + # Enable LiveDashboard in development + if Application.compile_env(:jade, :dev_routes) do + # If you want to use the LiveDashboard in production, you should put + # it behind authentication and allow only admins to access it. + # If your application does not have an admins-only section yet, + # you can use Plug.BasicAuth to set up some basic authentication + # as long as you are also using SSL (which you should anyway). + import Phoenix.LiveDashboard.Router + + scope "/dev" do + pipe_through(:browser) + + live_dashboard("/dashboard", metrics: JadeWeb.Telemetry) + end + end +end diff --git a/src/core/dbt-api/lib/jade_web/telemetry.ex b/src/core/dbt-api/lib/jade_web/telemetry.ex new file mode 100644 index 00000000..36f968e4 --- /dev/null +++ b/src/core/dbt-api/lib/jade_web/telemetry.ex @@ -0,0 +1,91 @@ +defmodule JadeWeb.Telemetry do + use Supervisor + import Telemetry.Metrics + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg, name: __MODULE__) + end + + @impl true + def init(_arg) do + children = [ + # Telemetry poller will execute the given period measurements + # every 10_000ms. Learn more here: https://hexdocs.pm/telemetry_metrics + {:telemetry_poller, measurements: periodic_measurements(), period: 10_000} + # Add reporters as children of your supervision tree. + # {Telemetry.Metrics.ConsoleReporter, metrics: metrics()} + ] + + Supervisor.init(children, strategy: :one_for_one) + end + + def metrics do + [ + # Phoenix Metrics + summary("phoenix.endpoint.start.system_time", + unit: {:native, :millisecond} + ), + summary("phoenix.endpoint.stop.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.start.system_time", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.exception.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.router_dispatch.stop.duration", + tags: [:route], + unit: {:native, :millisecond} + ), + summary("phoenix.socket_connected.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_joined.duration", + unit: {:native, :millisecond} + ), + summary("phoenix.channel_handled_in.duration", + tags: [:event], + unit: {:native, :millisecond} + ), + + # Database Metrics + summary("jade.repo.query.total_time", + unit: {:native, :millisecond}, + description: "The sum of the other measurements" + ), + summary("jade.repo.query.decode_time", + unit: {:native, :millisecond}, + description: "The time spent decoding the data received from the database" + ), + summary("jade.repo.query.query_time", + unit: {:native, :millisecond}, + description: "The time spent executing the query" + ), + summary("jade.repo.query.queue_time", + unit: {:native, :millisecond}, + description: "The time spent waiting for a database connection" + ), + summary("jade.repo.query.idle_time", + unit: {:native, :millisecond}, + description: "The time the connection spent waiting before being checked out for the query" + ), + + # VM Metrics + summary("vm.memory.total", unit: {:byte, :kilobyte}), + summary("vm.total_run_queue_lengths.total"), + summary("vm.total_run_queue_lengths.cpu"), + summary("vm.total_run_queue_lengths.io") + ] + end + + defp periodic_measurements do + [ + # A module, function and arguments to be invoked periodically. + # This function must call :telemetry.execute/3 and a metric must be added above. + # {JadeWeb, :count_users, []} + ] + end +end diff --git a/src/core/dbt-api/lib/mix/airflow_migrate.ex b/src/core/dbt-api/lib/mix/airflow_migrate.ex new file mode 100644 index 00000000..07f8076c --- /dev/null +++ b/src/core/dbt-api/lib/mix/airflow_migrate.ex @@ -0,0 +1,62 @@ +defmodule Mix.Tasks.Airflow.Migrate do + use Mix.Task + + require Logger + + alias Airflow.Repo + + @preferred_cli_env :test + + @prefixes ["airflow1", "airflow2"] + + @shortdoc "Migrates all test airflow schemas in the database" + def run(args) do + {opts, _, _} = OptionParser.parse(args, strict: [quiet: :boolean]) + log_level = maybe_silence_logger(opts) + + {:ok, _deps} = Application.ensure_all_started(:jade) + {:ok, _pid} = Repo.start_link() + + priv = Repo.config() |> Keyword.get(:priv) |> Path.expand() + priv = priv <> "/migrations" + + :ok = maybe_create_db() + + Enum.each(@prefixes, fn prefix -> + create_prefix(prefix) + run_migrations(prefix, priv) + end) + + if log_level, do: Logger.configure(level: log_level) + end + + defp maybe_silence_logger(opts) do + if Keyword.get(opts, :quiet, false) do + log_level = Logger.level() + Logger.configure(level: :error) + log_level + end + end + + defp maybe_create_db() do + case Repo.__adapter__().storage_up(Repo.config()) do + :ok -> :ok + {:error, :already_up} -> :ok + {:error, term} -> {:error, term} + end + end + + defp create_prefix(prefix) do + query = """ + CREATE SCHEMA "#{prefix}" + """ + + Repo.query(query) + end + + defp run_migrations(prefix, priv) do + opts = [prefix: prefix, all: true] + + Ecto.Migrator.run(Repo, priv, :up, opts) + end +end diff --git a/src/core/dbt-api/lib/test_helper.exs b/src/core/dbt-api/lib/test_helper.exs new file mode 100644 index 00000000..296c9087 --- /dev/null +++ b/src/core/dbt-api/lib/test_helper.exs @@ -0,0 +1,3 @@ +ExUnit.start(exclude: [:skip]) +Ecto.Adapters.SQL.Sandbox.mode(Jade.Repo, :manual) +Ecto.Adapters.SQL.Sandbox.mode(Datacoves.Repo, :manual) diff --git a/src/core/dbt-api/mix.exs b/src/core/dbt-api/mix.exs new file mode 100644 index 00000000..022920d8 --- /dev/null +++ b/src/core/dbt-api/mix.exs @@ -0,0 +1,120 @@ +defmodule Jade.MixProject do + use Mix.Project + + @version "0.1.0" + + def version, do: @version + + def project do + [ + app: :jade, + version: @version, + elixir: "~> 1.18", + elixirc_paths: elixirc_paths(Mix.env()), + start_permanent: Mix.env() == :prod, + test_paths: ["test", "lib"], + aliases: aliases(), + deps: deps() + ] + end + + # Configuration for the OTP application. + # + # Type `mix help compile.app` for more information. + def application do + [ + mod: {Jade.Application, []}, + extra_applications: [:logger, :runtime_tools] + ] + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] + + # Specifies your project dependencies. + # + # Type `mix help deps` for examples and options. + defp deps do + [ + # Phoenix Dependencies + {:phoenix, "~> 1.7.9"}, + {:phoenix_ecto, "~> 4.4"}, + {:phoenix_html, "~> 4.2"}, + {:phoenix_live_reload, "~> 1.2", only: :dev}, + {:phoenix_live_view, "~> 1.0"}, + {:phoenix_live_dashboard, "~> 0.8.2"}, + {:gettext, "~> 0.20"}, + {:bandit, ">= 0.0.0"}, + {:jason, "~> 1.2"}, + + # Heroicons icons + {:heroicons, + github: "tailwindlabs/heroicons", tag: "v2.1.1", sparse: "optimized", app: false, compile: false, depth: 1}, + + # Build and Styling + {:esbuild, "~> 0.7", runtime: Mix.env() == :dev}, + {:tailwind, "~> 0.2.0", runtime: Mix.env() == :dev}, + + # OpenAPI Documentation + {:open_api_spex, "~> 3.18"}, + + # Database + {:ecto_sql, "~> 3.10"}, + {:postgrex, ">= 0.0.0"}, + + # File Storage + {:minio, github: "PJUllrich/minio_ex"}, + + # AWS S3 Dependencies + {:ex_aws, "~> 2.5.0"}, + {:ex_aws_s3, "~> 2.5.2"}, + {:poison, "~> 6.0"}, + {:hackney, "~> 1.20.1"}, + {:sweet_xml, "~> 0.7.4"}, + + # Azure Blob Dependencies + {:azurex, "~> 1.1.0"}, + + # HTTP Requests + {:httpoison, "~> 2.2", override: true}, + + # Decription of Datacove Configs + {:fernetex, "~> 0.5.0"}, + + # Test Dependencies + {:floki, ">= 0.30.0", only: :test}, + {:ex_machina, "~> 2.8", only: :test}, + {:assertions, "~> 0.10", only: :test}, + + # Telemetry + {:telemetry_metrics, "~> 1.0"}, + {:telemetry_poller, "~> 1.0"} + ] + end + + # Aliases are shortcuts or tasks specific to the current project. + # For example, to install project dependencies and perform other setup tasks, run: + # + # $ mix setup + # + # See the documentation for `Mix` for more info on aliases. + defp aliases do + [ + setup: ["deps.get", "ecto.setup", "assets.setup", "assets.build"], + "ecto.setup": ["ecto.create", "ecto.migrate", "run priv/repo/seeds.exs"], + "ecto.reset": ["ecto.drop", "ecto.setup"], + test: ["ecto.create --quiet", "ecto.migrate --quiet", "airflow.migrate --quiet", "test"], + "test.reset": [ + "ecto.drop", + "ecto.create --quiet", + "ecto.migrate --quiet", + "airflow.migrate --quiet", + "test" + ], + "assets.setup": ["tailwind.install --if-missing", "esbuild.install --if-missing"], + "assets.build": ["tailwind default", "esbuild default"], + "assets.deploy": ["tailwind default --minify", "esbuild default --minify", "phx.digest"] + ] + end +end diff --git a/src/core/dbt-api/mix.lock b/src/core/dbt-api/mix.lock new file mode 100644 index 00000000..98e483ef --- /dev/null +++ b/src/core/dbt-api/mix.lock @@ -0,0 +1,62 @@ +%{ + "assertions": {:hex, :assertions, "0.20.1", "e6bfcefbf199bc760d273d5a204ad9ef8a4f6c2b4725fc51d10610d73062e57b", [:mix], [], "hexpm", "848284fbde52f752232d73b8f77060ad191e1a98c177873c4b8dc56c4958defd"}, + "azurex": {:hex, :azurex, "1.1.0", "32c18dc3817338bc7ff794a93a0448696cfc407585b634af7a2f4caada0c0e62", [:mix], [{:httpoison, "~> 1.8", [hex: :httpoison, repo: "hexpm", optional: false]}], "hexpm", "7abe82d2b9de836428eb608db6afc383dad200294fb9b9d9adc003681d72bf50"}, + "bandit": {:hex, :bandit, "1.6.7", "42f30e37a1c89a2a12943c5dca76f731a2313e8a2e21c1a95dc8241893e922d1", [:mix], [{:hpax, "~> 1.0", [hex: :hpax, repo: "hexpm", optional: false]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:thousand_island, "~> 1.0", [hex: :thousand_island, repo: "hexpm", optional: false]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "551ba8ff5e4fc908cbeb8c9f0697775fb6813a96d9de5f7fe02e34e76fd7d184"}, + "castore": {:hex, :castore, "1.0.11", "4bbd584741601eb658007339ea730b082cc61f3554cf2e8f39bf693a11b49073", [:mix], [], "hexpm", "e03990b4db988df56262852f20de0f659871c35154691427a5047f4967a16a62"}, + "certifi": {:hex, :certifi, "2.12.0", "2d1cca2ec95f59643862af91f001478c9863c2ac9cb6e2f89780bfd8de987329", [:rebar3], [], "hexpm", "ee68d85df22e554040cdb4be100f33873ac6051387baf6a8f6ce82272340ff1c"}, + "combine": {:hex, :combine, "0.10.0", "eff8224eeb56498a2af13011d142c5e7997a80c8f5b97c499f84c841032e429f", [:mix], [], "hexpm", "1b1dbc1790073076580d0d1d64e42eae2366583e7aecd455d1215b0d16f2451b"}, + "db_connection": {:hex, :db_connection, "2.7.0", "b99faa9291bb09892c7da373bb82cba59aefa9b36300f6145c5f201c7adf48ec", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "dcf08f31b2701f857dfc787fbad78223d61a32204f217f15e881dd93e4bdd3ff"}, + "decimal": {:hex, :decimal, "2.3.0", "3ad6255aa77b4a3c4f818171b12d237500e63525c2fd056699967a3e7ea20f62", [:mix], [], "hexpm", "a4d66355cb29cb47c3cf30e71329e58361cfcb37c34235ef3bf1d7bf3773aeac"}, + "ecto": {:hex, :ecto, "3.12.5", "4a312960ce612e17337e7cefcf9be45b95a3be6b36b6f94dfb3d8c361d631866", [:mix], [{:decimal, "~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "6eb18e80bef8bb57e17f5a7f068a1719fbda384d40fc37acb8eb8aeca493b6ea"}, + "ecto_sql": {:hex, :ecto_sql, "3.12.1", "c0d0d60e85d9ff4631f12bafa454bc392ce8b9ec83531a412c12a0d415a3a4d0", [:mix], [{:db_connection, "~> 2.4.1 or ~> 2.5", [hex: :db_connection, repo: "hexpm", optional: false]}, {:ecto, "~> 3.12", [hex: :ecto, repo: "hexpm", optional: false]}, {:myxql, "~> 0.7", [hex: :myxql, repo: "hexpm", optional: true]}, {:postgrex, "~> 0.19 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}, {:tds, "~> 2.1.1 or ~> 2.2", [hex: :tds, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.0 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "aff5b958a899762c5f09028c847569f7dfb9cc9d63bdb8133bff8a5546de6bf5"}, + "esbuild": {:hex, :esbuild, "0.9.0", "f043eeaca4932ca8e16e5429aebd90f7766f31ac160a25cbd9befe84f2bc068f", [:mix], [{:jason, "~> 1.4", [hex: :jason, repo: "hexpm", optional: false]}], "hexpm", "b415027f71d5ab57ef2be844b2a10d0c1b5a492d431727f43937adce22ba45ae"}, + "ex_aws": {:hex, :ex_aws, "2.5.8", "0393cfbc5e4a9e7017845451a015d836a670397100aa4c86901980e2a2c5f7d4", [:mix], [{:configparser_ex, "~> 4.0", [hex: :configparser_ex, repo: "hexpm", optional: true]}, {:hackney, "~> 1.16", [hex: :hackney, repo: "hexpm", optional: true]}, {:jason, "~> 1.1", [hex: :jason, repo: "hexpm", optional: true]}, {:jsx, "~> 2.8 or ~> 3.0", [hex: :jsx, repo: "hexpm", optional: true]}, {:mime, "~> 1.2 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:req, "~> 0.3", [hex: :req, repo: "hexpm", optional: true]}, {:sweet_xml, "~> 0.7", [hex: :sweet_xml, repo: "hexpm", optional: true]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "8f79777b7932168956c8cc3a6db41f5783aa816eb50de356aed3165a71e5f8c3"}, + "ex_aws_s3": {:hex, :ex_aws_s3, "2.5.6", "d135983bbd8b6df6350dfd83999437725527c1bea151e5055760bfc9b2d17c20", [:mix], [{:ex_aws, "~> 2.0", [hex: :ex_aws, repo: "hexpm", optional: false]}, {:sweet_xml, ">= 0.0.0", [hex: :sweet_xml, repo: "hexpm", optional: true]}], "hexpm", "9874e12847e469ca2f13a5689be04e546c16f63caf6380870b7f25bf7cb98875"}, + "ex_machina": {:hex, :ex_machina, "2.8.0", "a0e847b5712065055ec3255840e2c78ef9366634d62390839d4880483be38abe", [:mix], [{:ecto, "~> 2.2 or ~> 3.0", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_sql, "~> 3.0", [hex: :ecto_sql, repo: "hexpm", optional: true]}], "hexpm", "79fe1a9c64c0c1c1fab6c4fa5d871682cb90de5885320c187d117004627a7729"}, + "expo": {:hex, :expo, "1.1.0", "f7b9ed7fb5745ebe1eeedf3d6f29226c5dd52897ac67c0f8af62a07e661e5c75", [:mix], [], "hexpm", "fbadf93f4700fb44c331362177bdca9eeb8097e8b0ef525c9cc501cb9917c960"}, + "fernetex": {:hex, :fernetex, "0.5.0", "4bb470e9d914e0d4eddbfd851f50f48929c33c78d55075ce1e0a526fa6434653", [:mix], [], "hexpm", "b91e29d6087afdc52e67ec5a0f6df63b887b28a3796803f41c1db881fc861cc4"}, + "file_system": {:hex, :file_system, "1.1.0", "08d232062284546c6c34426997dd7ef6ec9f8bbd090eb91780283c9016840e8f", [:mix], [], "hexpm", "bfcf81244f416871f2a2e15c1b515287faa5db9c6bcf290222206d120b3d43f6"}, + "finch": {:hex, :finch, "0.16.0", "40733f02c89f94a112518071c0a91fe86069560f5dbdb39f9150042f44dcfb1a", [:mix], [{:castore, "~> 0.1 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: false]}, {:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:mint, "~> 1.3", [hex: :mint, repo: "hexpm", optional: false]}, {:nimble_options, "~> 0.4 or ~> 1.0", [hex: :nimble_options, repo: "hexpm", optional: false]}, {:nimble_pool, "~> 0.2.6 or ~> 1.0", [hex: :nimble_pool, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "f660174c4d519e5fec629016054d60edd822cdfe2b7270836739ac2f97735ec5"}, + "floki": {:hex, :floki, "0.37.0", "b83e0280bbc6372f2a403b2848013650b16640cd2470aea6701f0632223d719e", [:mix], [], "hexpm", "516a0c15a69f78c47dc8e0b9b3724b29608aa6619379f91b1ffa47109b5d0dd3"}, + "gettext": {:hex, :gettext, "0.26.2", "5978aa7b21fada6deabf1f6341ddba50bc69c999e812211903b169799208f2a8", [:mix], [{:expo, "~> 0.5.1 or ~> 1.0", [hex: :expo, repo: "hexpm", optional: false]}], "hexpm", "aa978504bcf76511efdc22d580ba08e2279caab1066b76bb9aa81c4a1e0a32a5"}, + "hackney": {:hex, :hackney, "1.20.1", "8d97aec62ddddd757d128bfd1df6c5861093419f8f7a4223823537bad5d064e2", [:rebar3], [{:certifi, "~> 2.12.0", [hex: :certifi, repo: "hexpm", optional: false]}, {:idna, "~> 6.1.0", [hex: :idna, repo: "hexpm", optional: false]}, {:metrics, "~> 1.0.0", [hex: :metrics, repo: "hexpm", optional: false]}, {:mimerl, "~> 1.1", [hex: :mimerl, repo: "hexpm", optional: false]}, {:parse_trans, "3.4.1", [hex: :parse_trans, repo: "hexpm", optional: false]}, {:ssl_verify_fun, "~> 1.1.0", [hex: :ssl_verify_fun, repo: "hexpm", optional: false]}, {:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "fe9094e5f1a2a2c0a7d10918fee36bfec0ec2a979994cff8cfe8058cd9af38e3"}, + "heroicons": {:git, "https://github.com/tailwindlabs/heroicons.git", "88ab3a0d790e6a47404cba02800a6b25d2afae50", [tag: "v2.1.1", sparse: "optimized", depth: 1]}, + "hpax": {:hex, :hpax, "1.0.2", "762df951b0c399ff67cc57c3995ec3cf46d696e41f0bba17da0518d94acd4aac", [:mix], [], "hexpm", "2f09b4c1074e0abd846747329eaa26d535be0eb3d189fa69d812bfb8bfefd32f"}, + "httpoison": {:hex, :httpoison, "2.2.1", "87b7ed6d95db0389f7df02779644171d7319d319178f6680438167d7b69b1f3d", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "51364e6d2f429d80e14fe4b5f8e39719cacd03eb3f9a9286e61e216feac2d2df"}, + "idna": {:hex, :idna, "6.1.1", "8a63070e9f7d0c62eb9d9fcb360a7de382448200fbbd1b106cc96d3d8099df8d", [:rebar3], [{:unicode_util_compat, "~> 0.7.0", [hex: :unicode_util_compat, repo: "hexpm", optional: false]}], "hexpm", "92376eb7894412ed19ac475e4a86f7b413c1b9fbb5bd16dccd57934157944cea"}, + "jason": {:hex, :jason, "1.4.4", "b9226785a9aa77b6857ca22832cffa5d5011a667207eb2a0ad56adb5db443b8a", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "c5eb0cab91f094599f94d55bc63409236a8ec69a21a67814529e8d5f6cc90b3b"}, + "metrics": {:hex, :metrics, "1.0.1", "25f094dea2cda98213cecc3aeff09e940299d950904393b2a29d191c346a8486", [:rebar3], [], "hexpm", "69b09adddc4f74a40716ae54d140f93beb0fb8978d8636eaded0c31b6f099f16"}, + "mime": {:hex, :mime, "2.0.6", "8f18486773d9b15f95f4f4f1e39b710045fa1de891fada4516559967276e4dc2", [:mix], [], "hexpm", "c9945363a6b26d747389aac3643f8e0e09d30499a138ad64fe8fd1d13d9b153e"}, + "mimerl": {:hex, :mimerl, "1.3.0", "d0cd9fc04b9061f82490f6581e0128379830e78535e017f7780f37fea7545726", [:rebar3], [], "hexpm", "a1e15a50d1887217de95f0b9b0793e32853f7c258a5cd227650889b38839fe9d"}, + "minio": {:git, "https://github.com/PJUllrich/minio_ex.git", "61f5b73a65f70126603ddcefc5d516daf39d3afd", []}, + "mint": {:hex, :mint, "1.5.1", "8db5239e56738552d85af398798c80648db0e90f343c8469f6c6d8898944fb6f", [:mix], [{:castore, "~> 0.1.0 or ~> 1.0", [hex: :castore, repo: "hexpm", optional: true]}, {:hpax, "~> 0.1.1", [hex: :hpax, repo: "hexpm", optional: false]}], "hexpm", "4a63e1e76a7c3956abd2c72f370a0d0aecddc3976dea5c27eccbecfa5e7d5b1e"}, + "nimble_options": {:hex, :nimble_options, "1.0.2", "92098a74df0072ff37d0c12ace58574d26880e522c22801437151a159392270e", [:mix], [], "hexpm", "fd12a8db2021036ce12a309f26f564ec367373265b53e25403f0ee697380f1b8"}, + "nimble_pool": {:hex, :nimble_pool, "1.0.0", "5eb82705d138f4dd4423f69ceb19ac667b3b492ae570c9f5c900bb3d2f50a847", [:mix], [], "hexpm", "80be3b882d2d351882256087078e1b1952a28bf98d0a287be87e4a24a710b67a"}, + "open_api_spex": {:hex, :open_api_spex, "3.21.2", "6a704f3777761feeb5657340250d6d7332c545755116ca98f33d4b875777e1e5", [:mix], [{:decimal, "~> 1.0 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:plug, "~> 1.7", [hex: :plug, repo: "hexpm", optional: false]}, {:poison, "~> 3.0 or ~> 4.0 or ~> 5.0 or ~> 6.0", [hex: :poison, repo: "hexpm", optional: true]}, {:ymlr, "~> 2.0 or ~> 3.0 or ~> 4.0 or ~> 5.0", [hex: :ymlr, repo: "hexpm", optional: true]}], "hexpm", "f42ae6ed668b895ebba3e02773cfb4b41050df26f803f2ef634c72a7687dc387"}, + "parse_trans": {:hex, :parse_trans, "3.4.1", "6e6aa8167cb44cc8f39441d05193be6e6f4e7c2946cb2759f015f8c56b76e5ff", [:rebar3], [], "hexpm", "620a406ce75dada827b82e453c19cf06776be266f5a67cff34e1ef2cbb60e49a"}, + "phoenix": {:hex, :phoenix, "1.7.19", "36617efe5afbd821099a8b994ff4618a340a5bfb25531a1802c4d4c634017a57", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix_pubsub, "~> 2.1", [hex: :phoenix_pubsub, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.7", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:plug_crypto, "~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}, {:websock_adapter, "~> 0.5.3", [hex: :websock_adapter, repo: "hexpm", optional: false]}], "hexpm", "ba4dc14458278773f905f8ae6c2ec743d52c3a35b6b353733f64f02dfe096cd6"}, + "phoenix_ecto": {:hex, :phoenix_ecto, "4.6.3", "f686701b0499a07f2e3b122d84d52ff8a31f5def386e03706c916f6feddf69ef", [:mix], [{:ecto, "~> 3.5", [hex: :ecto, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.1", [hex: :phoenix_html, repo: "hexpm", optional: true]}, {:plug, "~> 1.9", [hex: :plug, repo: "hexpm", optional: false]}, {:postgrex, "~> 0.16 or ~> 1.0", [hex: :postgrex, repo: "hexpm", optional: true]}], "hexpm", "909502956916a657a197f94cc1206d9a65247538de8a5e186f7537c895d95764"}, + "phoenix_html": {:hex, :phoenix_html, "4.2.0", "83a4d351b66f472ebcce242e4ae48af1b781866f00ef0eb34c15030d4e2069ac", [:mix], [], "hexpm", "9713b3f238d07043583a94296cc4bbdceacd3b3a6c74667f4df13971e7866ec8"}, + "phoenix_live_dashboard": {:hex, :phoenix_live_dashboard, "0.8.6", "7b1f0327f54c9eb69845fd09a77accf922f488c549a7e7b8618775eb603a62c7", [:mix], [{:ecto, "~> 3.6.2 or ~> 3.7", [hex: :ecto, repo: "hexpm", optional: true]}, {:ecto_mysql_extras, "~> 0.5", [hex: :ecto_mysql_extras, repo: "hexpm", optional: true]}, {:ecto_psql_extras, "~> 0.7", [hex: :ecto_psql_extras, repo: "hexpm", optional: true]}, {:ecto_sqlite3_extras, "~> 1.1.7 or ~> 1.2.0", [hex: :ecto_sqlite3_extras, repo: "hexpm", optional: true]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:phoenix_live_view, "~> 0.19 or ~> 1.0", [hex: :phoenix_live_view, repo: "hexpm", optional: false]}, {:telemetry_metrics, "~> 0.6 or ~> 1.0", [hex: :telemetry_metrics, repo: "hexpm", optional: false]}], "hexpm", "1681ab813ec26ca6915beb3414aa138f298e17721dc6a2bde9e6eb8a62360ff6"}, + "phoenix_live_reload": {:hex, :phoenix_live_reload, "1.5.3", "f2161c207fda0e4fb55165f650f7f8db23f02b29e3bff00ff7ef161d6ac1f09d", [:mix], [{:file_system, "~> 0.3 or ~> 1.0", [hex: :file_system, repo: "hexpm", optional: false]}, {:phoenix, "~> 1.4", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "b4ec9cd73cb01ff1bd1cac92e045d13e7030330b74164297d1aee3907b54803c"}, + "phoenix_live_view": {:hex, :phoenix_live_view, "1.0.4", "327491b033e79db2f887b065c5a2993228449091883d74cfa1baa12f8c98d5eb", [:mix], [{:floki, "~> 0.36", [hex: :floki, repo: "hexpm", optional: true]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:phoenix, "~> 1.6.15 or ~> 1.7.0", [hex: :phoenix, repo: "hexpm", optional: false]}, {:phoenix_html, "~> 3.3 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: false]}, {:phoenix_template, "~> 1.0", [hex: :phoenix_template, repo: "hexpm", optional: false]}, {:phoenix_view, "~> 2.0", [hex: :phoenix_view, repo: "hexpm", optional: true]}, {:plug, "~> 1.15", [hex: :plug, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.2 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a9865316ddf8d78f382d63af278d20436b52d262b60239956817a61279514366"}, + "phoenix_pubsub": {:hex, :phoenix_pubsub, "2.1.3", "3168d78ba41835aecad272d5e8cd51aa87a7ac9eb836eabc42f6e57538e3731d", [:mix], [], "hexpm", "bba06bc1dcfd8cb086759f0edc94a8ba2bc8896d5331a1e2c2902bf8e36ee502"}, + "phoenix_template": {:hex, :phoenix_template, "1.0.4", "e2092c132f3b5e5b2d49c96695342eb36d0ed514c5b252a77048d5969330d639", [:mix], [{:phoenix_html, "~> 2.14.2 or ~> 3.0 or ~> 4.0", [hex: :phoenix_html, repo: "hexpm", optional: true]}], "hexpm", "2c0c81f0e5c6753faf5cca2f229c9709919aba34fab866d3bc05060c9c444206"}, + "plug": {:hex, :plug, "1.16.1", "40c74619c12f82736d2214557dedec2e9762029b2438d6d175c5074c933edc9d", [:mix], [{:mime, "~> 1.0 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug_crypto, "~> 1.1.1 or ~> 1.2 or ~> 2.0", [hex: :plug_crypto, repo: "hexpm", optional: false]}, {:telemetry, "~> 0.4.3 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "a13ff6b9006b03d7e33874945b2755253841b238c34071ed85b0e86057f8cddc"}, + "plug_crypto": {:hex, :plug_crypto, "2.1.0", "f44309c2b06d249c27c8d3f65cfe08158ade08418cf540fd4f72d4d6863abb7b", [:mix], [], "hexpm", "131216a4b030b8f8ce0f26038bc4421ae60e4bb95c5cf5395e1421437824c4fa"}, + "poison": {:hex, :poison, "6.0.0", "9bbe86722355e36ffb62c51a552719534257ba53f3271dacd20fbbd6621a583a", [:mix], [{:decimal, "~> 2.1", [hex: :decimal, repo: "hexpm", optional: true]}], "hexpm", "bb9064632b94775a3964642d6a78281c07b7be1319e0016e1643790704e739a2"}, + "postgrex": {:hex, :postgrex, "0.20.0", "363ed03ab4757f6bc47942eff7720640795eb557e1935951c1626f0d303a3aed", [:mix], [{:db_connection, "~> 2.1", [hex: :db_connection, repo: "hexpm", optional: false]}, {:decimal, "~> 1.5 or ~> 2.0", [hex: :decimal, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:table, "~> 0.1.0", [hex: :table, repo: "hexpm", optional: true]}], "hexpm", "d36ef8b36f323d29505314f704e21a1a038e2dc387c6409ee0cd24144e187c0f"}, + "req": {:hex, :req, "0.4.4", "a17b6bec956c9af4f08b5d8e8a6fc6e4edf24ccc0ac7bf363a90bba7a0f0138c", [:mix], [{:brotli, "~> 0.3.1", [hex: :brotli, repo: "hexpm", optional: true]}, {:ezstd, "~> 1.0", [hex: :ezstd, repo: "hexpm", optional: true]}, {:finch, "~> 0.9", [hex: :finch, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: false]}, {:mime, "~> 1.6 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:nimble_csv, "~> 1.0", [hex: :nimble_csv, repo: "hexpm", optional: true]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: true]}], "hexpm", "2618c0493444fee927d12073afb42e9154e766b3f4448e1011f0d3d551d1a011"}, + "ssl_verify_fun": {:hex, :ssl_verify_fun, "1.1.7", "354c321cf377240c7b8716899e182ce4890c5938111a1296add3ec74cf1715df", [:make, :mix, :rebar3], [], "hexpm", "fe4c190e8f37401d30167c8c405eda19469f34577987c76dde613e838bbc67f8"}, + "sweet_xml": {:hex, :sweet_xml, "0.7.5", "803a563113981aaac202a1dbd39771562d0ad31004ddbfc9b5090bdcd5605277", [:mix], [], "hexpm", "193b28a9b12891cae351d81a0cead165ffe67df1b73fe5866d10629f4faefb12"}, + "tailwind": {:hex, :tailwind, "0.2.4", "5706ec47182d4e7045901302bf3a333e80f3d1af65c442ba9a9eed152fb26c2e", [:mix], [{:castore, ">= 0.0.0", [hex: :castore, repo: "hexpm", optional: false]}], "hexpm", "c6e4a82b8727bab593700c998a4d98cf3d8025678bfde059aed71d0000c3e463"}, + "telemetry": {:hex, :telemetry, "1.3.0", "fedebbae410d715cf8e7062c96a1ef32ec22e764197f70cda73d82778d61e7a2", [:rebar3], [], "hexpm", "7015fc8919dbe63764f4b4b87a95b7c0996bd539e0d499be6ec9d7f3875b79e6"}, + "telemetry_metrics": {:hex, :telemetry_metrics, "1.1.0", "5bd5f3b5637e0abea0426b947e3ce5dd304f8b3bc6617039e2b5a008adc02f8f", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "e7b79e8ddfde70adb6db8a6623d1778ec66401f366e9a8f5dd0955c56bc8ce67"}, + "telemetry_poller": {:hex, :telemetry_poller, "1.1.0", "58fa7c216257291caaf8d05678c8d01bd45f4bdbc1286838a28c4bb62ef32999", [:rebar3], [{:telemetry, "~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "9eb9d9cbfd81cbd7cdd24682f8711b6e2b691289a0de6826e58452f28c103c8f"}, + "thousand_island": {:hex, :thousand_island, "1.3.9", "095db3e2650819443e33237891271943fad3b7f9ba341073947581362582ab5a", [:mix], [{:telemetry, "~> 0.4 or ~> 1.0", [hex: :telemetry, repo: "hexpm", optional: false]}], "hexpm", "25ab4c07badadf7f87adb4ab414e0ed374e5f19e72503aa85132caa25776e54f"}, + "timex": {:hex, :timex, "3.7.11", "bb95cb4eb1d06e27346325de506bcc6c30f9c6dea40d1ebe390b262fad1862d1", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:gettext, "~> 0.20", [hex: :gettext, repo: "hexpm", optional: false]}, {:tzdata, "~> 1.1", [hex: :tzdata, repo: "hexpm", optional: false]}], "hexpm", "8b9024f7efbabaf9bd7aa04f65cf8dcd7c9818ca5737677c7b76acbc6a94d1aa"}, + "tzdata": {:hex, :tzdata, "1.1.2", "45e5f1fcf8729525ec27c65e163be5b3d247ab1702581a94674e008413eef50b", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "cec7b286e608371602318c414f344941d5eb0375e14cfdab605cca2fe66cba8b"}, + "unicode_util_compat": {:hex, :unicode_util_compat, "0.7.0", "bc84380c9ab48177092f43ac89e4dfa2c6d62b40b8bd132b1059ecc7232f9a78", [:rebar3], [], "hexpm", "25eee6d67df61960cf6a794239566599b09e17e668d3700247bc498638152521"}, + "websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"}, + "websock_adapter": {:hex, :websock_adapter, "0.5.8", "3b97dc94e407e2d1fc666b2fb9acf6be81a1798a2602294aac000260a7c4a47d", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "315b9a1865552212b5f35140ad194e67ce31af45bcee443d4ecb96b5fd3f3782"}, +} diff --git a/src/core/dbt-api/priv/gettext/en/LC_MESSAGES/errors.po b/src/core/dbt-api/priv/gettext/en/LC_MESSAGES/errors.po new file mode 100644 index 00000000..844c4f5c --- /dev/null +++ b/src/core/dbt-api/priv/gettext/en/LC_MESSAGES/errors.po @@ -0,0 +1,112 @@ +## `msgid`s in this file come from POT (.pot) files. +## +## Do not add, change, or remove `msgid`s manually here as +## they're tied to the ones in the corresponding POT file +## (with the same domain). +## +## Use `mix gettext.extract --merge` or `mix gettext.merge` +## to merge POT files into PO files. +msgid "" +msgstr "" +"Language: en\n" + +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} byte(s)" +msgid_plural "should be %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} byte(s)" +msgid_plural "should be at least %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} byte(s)" +msgid_plural "should be at most %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/src/core/dbt-api/priv/gettext/errors.pot b/src/core/dbt-api/priv/gettext/errors.pot new file mode 100644 index 00000000..ccf5c687 --- /dev/null +++ b/src/core/dbt-api/priv/gettext/errors.pot @@ -0,0 +1,110 @@ +## This is a PO Template file. +## +## `msgid`s here are often extracted from source code. +## Add new translations manually only if they're dynamic +## translations that can't be statically extracted. +## +## Run `mix gettext.extract` to bring this file up to +## date. Leave `msgstr`s empty as changing them here has no +## effect: edit them in PO (`.po`) files instead. + +## From Ecto.Changeset.cast/4 +msgid "can't be blank" +msgstr "" + +## From Ecto.Changeset.unique_constraint/3 +msgid "has already been taken" +msgstr "" + +## From Ecto.Changeset.put_change/3 +msgid "is invalid" +msgstr "" + +## From Ecto.Changeset.validate_acceptance/3 +msgid "must be accepted" +msgstr "" + +## From Ecto.Changeset.validate_format/3 +msgid "has invalid format" +msgstr "" + +## From Ecto.Changeset.validate_subset/3 +msgid "has an invalid entry" +msgstr "" + +## From Ecto.Changeset.validate_exclusion/3 +msgid "is reserved" +msgstr "" + +## From Ecto.Changeset.validate_confirmation/3 +msgid "does not match confirmation" +msgstr "" + +## From Ecto.Changeset.no_assoc_constraint/3 +msgid "is still associated with this entry" +msgstr "" + +msgid "are still associated with this entry" +msgstr "" + +## From Ecto.Changeset.validate_length/3 +msgid "should have %{count} item(s)" +msgid_plural "should have %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} character(s)" +msgid_plural "should be %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be %{count} byte(s)" +msgid_plural "should be %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at least %{count} item(s)" +msgid_plural "should have at least %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} character(s)" +msgid_plural "should be at least %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at least %{count} byte(s)" +msgid_plural "should be at least %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should have at most %{count} item(s)" +msgid_plural "should have at most %{count} item(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} character(s)" +msgid_plural "should be at most %{count} character(s)" +msgstr[0] "" +msgstr[1] "" + +msgid "should be at most %{count} byte(s)" +msgid_plural "should be at most %{count} byte(s)" +msgstr[0] "" +msgstr[1] "" + +## From Ecto.Changeset.validate_number/3 +msgid "must be less than %{number}" +msgstr "" + +msgid "must be greater than %{number}" +msgstr "" + +msgid "must be less than or equal to %{number}" +msgstr "" + +msgid "must be greater than or equal to %{number}" +msgstr "" + +msgid "must be equal to %{number}" +msgstr "" diff --git a/src/core/dbt-api/priv/repo/migrations/.formatter.exs b/src/core/dbt-api/priv/repo/migrations/.formatter.exs new file mode 100644 index 00000000..49f9151e --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/.formatter.exs @@ -0,0 +1,4 @@ +[ + import_deps: [:ecto_sql], + inputs: ["*.exs"] +] diff --git a/src/core/dbt-api/priv/repo/migrations/20231021122344_create_tokens.exs b/src/core/dbt-api/priv/repo/migrations/20231021122344_create_tokens.exs new file mode 100644 index 00000000..bd614533 --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20231021122344_create_tokens.exs @@ -0,0 +1,15 @@ +defmodule Jade.Repo.Migrations.CreateTokens do + use Ecto.Migration + + def change do + create table(:tokens) do + add(:account_id, :integer) + add(:key_hash, :string) + + timestamps(type: :utc_datetime) + end + + create(unique_index(:tokens, [:key_hash])) + create(unique_index(:tokens, [:account_id])) + end +end diff --git a/src/core/dbt-api/priv/repo/migrations/20231102163358_create_dag_ids.exs b/src/core/dbt-api/priv/repo/migrations/20231102163358_create_dag_ids.exs new file mode 100644 index 00000000..7371d28c --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20231102163358_create_dag_ids.exs @@ -0,0 +1,12 @@ +defmodule Jade.Repo.Migrations.CreateJobIds do + use Ecto.Migration + + def change do + create table(:job_ids) do + add(:environment_id, :integer) + add(:dag_id, :string) + end + + create(unique_index(:job_ids, [:environment_id, :dag_id])) + end +end diff --git a/src/core/dbt-api/priv/repo/migrations/20231114190630_create_dag_run_ids.exs b/src/core/dbt-api/priv/repo/migrations/20231114190630_create_dag_run_ids.exs new file mode 100644 index 00000000..eba8840d --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20231114190630_create_dag_run_ids.exs @@ -0,0 +1,12 @@ +defmodule Jade.Repo.Migrations.CreateJobRunIds do + use Ecto.Migration + + def change do + create table(:job_run_ids) do + add(:environment_id, :integer) + add(:dag_run_id, :integer) + end + + create(unique_index(:job_run_ids, [:environment_id, :dag_run_id])) + end +end diff --git a/src/core/dbt-api/priv/repo/migrations/20231121164453_create_manifests.exs b/src/core/dbt-api/priv/repo/migrations/20231121164453_create_manifests.exs new file mode 100644 index 00000000..794af6ca --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20231121164453_create_manifests.exs @@ -0,0 +1,20 @@ +defmodule Jade.Repo.Migrations.CreateManifests do + use Ecto.Migration + + def change do + create table(:manifests) do + add :slug, :binary_id, default: fragment("gen_random_uuid()"), null: false + add :account_id, :integer + add :environment_slug, :string + add :dag_id, :string + add :dag_run_id, :integer + add :dag_run_run_id, :string + add :job_run_id, references(:job_run_ids, on_delete: :nothing) + add :content, :jsonb + + timestamps(type: :utc_datetime) + end + + create index(:manifests, [:account_id, :job_run_id]) + end +end diff --git a/src/core/dbt-api/priv/repo/migrations/20240308120130_add_project_id_to_manifests.exs b/src/core/dbt-api/priv/repo/migrations/20240308120130_add_project_id_to_manifests.exs new file mode 100644 index 00000000..fff5e9ef --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20240308120130_add_project_id_to_manifests.exs @@ -0,0 +1,11 @@ +defmodule Jade.Repo.Migrations.AddProjectIdToManifests do + use Ecto.Migration + + def change do + alter table(:manifests) do + add :project_id, :integer + end + + create index(:manifests, [:project_id]) + end +end diff --git a/src/core/dbt-api/priv/repo/migrations/20250110112506_create_files.exs b/src/core/dbt-api/priv/repo/migrations/20250110112506_create_files.exs new file mode 100644 index 00000000..f7ef5880 --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20250110112506_create_files.exs @@ -0,0 +1,18 @@ +defmodule Jade.Repo.Migrations.CreateFiles do + use Ecto.Migration + + def change do + create table(:files) do + add :slug, :binary_id, default: fragment("gen_random_uuid()"), null: false + add :filename, :string + add :environment_slug, :string + add :tag, :string + add :path, :string + + timestamps(type: :utc_datetime) + end + + create unique_index(:files, [:slug]) + create unique_index(:files, [:environment_slug, :tag]) + end +end diff --git a/src/core/dbt-api/priv/repo/migrations/20250303161854_add_tag_to_manifests.exs b/src/core/dbt-api/priv/repo/migrations/20250303161854_add_tag_to_manifests.exs new file mode 100644 index 00000000..fafad459 --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20250303161854_add_tag_to_manifests.exs @@ -0,0 +1,11 @@ +defmodule Jade.Repo.Migrations.AddTagToManifests do + use Ecto.Migration + + def change do + alter table(:manifests) do + add :tag, :string + end + + create unique_index(:manifests, [:environment_slug, :tag]) + end +end diff --git a/src/core/dbt-api/priv/repo/migrations/20250329223055_allow_multifile_upload.exs b/src/core/dbt-api/priv/repo/migrations/20250329223055_allow_multifile_upload.exs new file mode 100644 index 00000000..869a6038 --- /dev/null +++ b/src/core/dbt-api/priv/repo/migrations/20250329223055_allow_multifile_upload.exs @@ -0,0 +1,13 @@ +defmodule :"Elixir.Jade.Repo.Migrations.Allow-multifile-upload" do + use Ecto.Migration + @disable_ddl_transaction true + @disable_migration_lock true + + def change do + drop_if_exists unique_index(:files, [:environment_slug, :tag]) + + create_if_not_exists index(:files, [:environment_slug, :filename, :tag]) + create_if_not_exists index(:files, [:environment_slug, :filename]) + create_if_not_exists index(:files, [:environment_slug, :tag]) + end +end diff --git a/src/core/dbt-api/priv/repo/seeds.exs b/src/core/dbt-api/priv/repo/seeds.exs new file mode 100644 index 00000000..a236946c --- /dev/null +++ b/src/core/dbt-api/priv/repo/seeds.exs @@ -0,0 +1,11 @@ +# Script for populating the database. You can run it as: +# +# mix run priv/repo/seeds.exs +# +# Inside the script, you can read and write to any of your +# repositories directly: +# +# Jade.Repo.insert!(%Jade.SomeSchema{}) +# +# We recommend using the bang functions (`insert!`, `update!` +# and so on) as they will fail if something goes wrong. diff --git a/src/core/dbt-api/priv/static/favicon-a8ca4e3a2bb8fea46a9ee9e102e7d3eb.ico b/src/core/dbt-api/priv/static/favicon-a8ca4e3a2bb8fea46a9ee9e102e7d3eb.ico new file mode 100644 index 00000000..73de524a Binary files /dev/null and b/src/core/dbt-api/priv/static/favicon-a8ca4e3a2bb8fea46a9ee9e102e7d3eb.ico differ diff --git a/src/core/dbt-api/priv/static/favicon.ico b/src/core/dbt-api/priv/static/favicon.ico new file mode 100644 index 00000000..73de524a Binary files /dev/null and b/src/core/dbt-api/priv/static/favicon.ico differ diff --git a/src/core/dbt-api/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg b/src/core/dbt-api/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg new file mode 100644 index 00000000..9f26baba --- /dev/null +++ b/src/core/dbt-api/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg @@ -0,0 +1,6 @@ + diff --git a/src/core/dbt-api/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg.gz b/src/core/dbt-api/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg.gz new file mode 100644 index 00000000..d8d1f38b Binary files /dev/null and b/src/core/dbt-api/priv/static/images/logo-06a11be1f2cdde2c851763d00bdd2e80.svg.gz differ diff --git a/src/core/dbt-api/priv/static/images/logo.svg b/src/core/dbt-api/priv/static/images/logo.svg new file mode 100644 index 00000000..9f26baba --- /dev/null +++ b/src/core/dbt-api/priv/static/images/logo.svg @@ -0,0 +1,6 @@ + diff --git a/src/core/dbt-api/priv/static/images/logo.svg.gz b/src/core/dbt-api/priv/static/images/logo.svg.gz new file mode 100644 index 00000000..d8d1f38b Binary files /dev/null and b/src/core/dbt-api/priv/static/images/logo.svg.gz differ diff --git a/src/core/dbt-api/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt b/src/core/dbt-api/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt new file mode 100644 index 00000000..26e06b5f --- /dev/null +++ b/src/core/dbt-api/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt @@ -0,0 +1,5 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/src/core/dbt-api/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt.gz b/src/core/dbt-api/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt.gz new file mode 100644 index 00000000..a1d6ca87 Binary files /dev/null and b/src/core/dbt-api/priv/static/robots-9e2c81b0855bbff2baa8371bc4a78186.txt.gz differ diff --git a/src/core/dbt-api/priv/static/robots.txt b/src/core/dbt-api/priv/static/robots.txt new file mode 100644 index 00000000..26e06b5f --- /dev/null +++ b/src/core/dbt-api/priv/static/robots.txt @@ -0,0 +1,5 @@ +# See https://www.robotstxt.org/robotstxt.html for documentation on how to use the robots.txt file +# +# To ban all spiders from the entire site uncomment the next two lines: +# User-agent: * +# Disallow: / diff --git a/src/core/dbt-api/priv/static/robots.txt.gz b/src/core/dbt-api/priv/static/robots.txt.gz new file mode 100644 index 00000000..a1d6ca87 Binary files /dev/null and b/src/core/dbt-api/priv/static/robots.txt.gz differ diff --git a/src/core/dbt-api/run.sh b/src/core/dbt-api/run.sh new file mode 100755 index 00000000..0b236618 --- /dev/null +++ b/src/core/dbt-api/run.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Exit the script immediately if any command returns a non-zero exit status. +set -e + +if [ "$1" = "dev" ]; then + + echo "Starting in Dev Mode" + + export MIX_ENV="dev" + export PHX_SERVER="false" + export PHX_ENDPOINT="false" + + mix deps.get + iex -S mix phx.server + +elif [ "$1" = "shell" ]; then + + echo "Creating a Shell in the Production Server" + + iex --name "shell@127.0.0.1" --cookie $IEX_COOKIE --remsh "server@127.0.0.1" + +else + + echo "Starting in Prod Web Server Mode" + + echo "Connecting to the database with the following environment variables" + echo "Jade database:" + echo "DB_USER: ${DB_USER}" + echo "DB_PASS: hidden" + echo "DB_HOST: ${DB_HOST}" + echo "DB_NAME: ${DB_NAME}" + + echo "Datacoves database:" + echo "DATACOVES_DB_USER: ${DATACOVES_DB_USER}" + echo "DATACOVES_DB_PASS: hidden" + echo "DATACOVES_DB_HOST: ${DATACOVES_DB_HOST}" + echo "DATACOVES_DB_NAME: ${DATACOVES_DB_NAME}" + + export MIX_ENV="prod" + export PHX_SERVER="true" + export PHX_ENDPOINT="true" + + mix do ecto.create, ecto.migrate + elixir --name "server@127.0.0.1" --cookie $IEX_COOKIE -S mix phx.server + +fi diff --git a/src/core/dbt-api/test/airflow_repo/migrations/20231113150452_create_dags.exs b/src/core/dbt-api/test/airflow_repo/migrations/20231113150452_create_dags.exs new file mode 100644 index 00000000..3a28c680 --- /dev/null +++ b/src/core/dbt-api/test/airflow_repo/migrations/20231113150452_create_dags.exs @@ -0,0 +1,32 @@ +defmodule Airflow.Repo.Migrations.CreateDags do + use Ecto.Migration + + def change do + create table(:dag, primary_key: false) do + add :dag_id, :string, primary_key: true + add :default_view, :string + add :description, :string + add :fileloc, :string + add :has_import_errors, :boolean, default: false + add :has_task_concurrency_limits, :boolean + add :is_active, :boolean, default: true + add :is_paused, :boolean, default: false + add :is_subdag, :boolean, default: false + add :last_expired, :utc_datetime + add :last_parsed_time, :utc_datetime + add :last_pickled, :utc_datetime + add :max_active_runs, :integer + add :max_active_tasks, :integer + add :next_dagrun_create_after, :utc_datetime + add :next_dagrun_data_interval_end, :utc_datetime + add :next_dagrun_data_interval_start, :utc_datetime + add :next_dagrun, :utc_datetime + add :owners, :string + add :pickle_id, :integer + add :root_dag_id, :string + add :schedule_interval, :string + add :scheduler_lock, :boolean + add :timetable_description, :string + end + end +end diff --git a/src/core/dbt-api/test/airflow_repo/migrations/20231113150456_create_dag_runs.exs b/src/core/dbt-api/test/airflow_repo/migrations/20231113150456_create_dag_runs.exs new file mode 100644 index 00000000..5cf86cdc --- /dev/null +++ b/src/core/dbt-api/test/airflow_repo/migrations/20231113150456_create_dag_runs.exs @@ -0,0 +1,24 @@ +defmodule Airflow.Repo.Migrations.CreateDagRuns do + use Ecto.Migration + + def change do + create table(:dag_run) do + add :dag_id, :string + add :execution_date, :utc_datetime + add :state, :string + add :run_id, :string + add :external_trigger, :boolean + add :conf, :binary + add :end_date, :utc_datetime + add :start_date, :utc_datetime + add :run_type, :string + add :last_scheduling_decision, :utc_datetime + add :dag_hash, :string + add :creating_job_id, :integer + add :queued_at, :utc_datetime + add :data_interval_start, :utc_datetime + add :data_interval_end, :utc_datetime + add :log_template_id, :integer + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20231111085033_create_accounts.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20231111085033_create_accounts.exs new file mode 100644 index 00000000..cc55a37f --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20231111085033_create_accounts.exs @@ -0,0 +1,26 @@ +defmodule Datacoves.Repo.Migrations.CreateAccounts do + use Ecto.Migration + + def change do + create table(:users_account) do + add :approve_billing_events, :boolean + add :cancelled_subscription, :map + add :created_at, :utc_datetime + add :created_by_id, :integer + add :customer_id, :string + add :deactivated_at, :utc_datetime + add :developer_licenses, :integer + add :name, :string + add :notifications_enabled, :map + add :plan_id, :integer + add :settings, :map + add :slug, :string + add :subscription_updated_at, :utc_datetime + add :subscription, :map + add :trial_ends_at, :utc_datetime + add :trial_started_at, :utc_datetime + add :updated_at, :utc_datetime + add :workers_execution_limit, :map + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20231111151248_create_projects.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20231111151248_create_projects.exs new file mode 100644 index 00000000..fa99fbbd --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20231111151248_create_projects.exs @@ -0,0 +1,23 @@ +defmodule Datacoves.Repo.Migrations.CreateProjects do + use Ecto.Migration + + def change do + create table(:projects_project) do + add :ci_home_url, :string + add :ci_provider, :string + add :clone_strategy, :string + add :deploy_credentials, :binary + add :deploy_key_id, :integer + add :name, :string + add :release_branch, :string + add :repository_id, :integer + add :settings, :map + add :slug, :string + add :validated_at, :utc_datetime + add :created_at, :utc_datetime + add :updated_at, :utc_datetime + + add :account_id, references(:users_account) + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20231111153154_create_environments.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20231111153154_create_environments.exs new file mode 100644 index 00000000..f48b2753 --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20231111153154_create_environments.exs @@ -0,0 +1,37 @@ +defmodule Datacoves.Repo.Migrations.CreateEnvironments do + use Ecto.Migration + + def change do + create table(:projects_environment) do + add :airbyte_config, :binary + add :airflow_config, :binary + add :cluster_id, :integer + add :project_id, references(:projects_project) + add :dbt_docs_config, :binary + add :dbt_home_path, :string + add :dbt_profiles_dir, :string + add :docker_config_secret_name, :string + add :docker_config, :binary + add :docker_registry, :string + add :internal_services, :map + add :minio_config, :binary + add :name, :string + add :pomerium_config, :binary + add :profile_id, :integer + add :quotas, :map + add :release_id, :integer + add :release_profile, :string + add :services, :map + add :settings, :map + add :slug, :string + add :superset_config, :binary + add :sync, :boolean + add :type, :string + add :update_strategy, :string + add :workspace_generation, :integer + + add :created_at, :utc_datetime + add :updated_at, :utc_datetime + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20231220185241_create_users.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185241_create_users.exs new file mode 100644 index 00000000..9336661f --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185241_create_users.exs @@ -0,0 +1,21 @@ +defmodule Datacoves.Repo.Migrations.CreateUsers do + use Ecto.Migration + + def change do + create table(:users_user) do + add :eid, :binary_id + add :created_at, :utc_datetime + add :updated_at, :utc_datetime + add :password, :string + add :last_login, :utc_datetime + add :email, :string + add :name, :string + add :avatar, :string + add :deactivated_at, :utc_datetime + add :is_superuser, :boolean + add :settings, :map + add :is_service_account, :boolean + add :slug, :string + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20231220185325_create_auth_tokens.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185325_create_auth_tokens.exs new file mode 100644 index 00000000..222c634b --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185325_create_auth_tokens.exs @@ -0,0 +1,12 @@ +defmodule Datacoves.Repo.Migrations.CreateAuthTokens do + use Ecto.Migration + + def change do + create table(:authtoken_token, primary_key: false) do + add :key, :string, primary_key: true + add :created, :utc_datetime + + add :user_id, references(:users_user, on_delete: :delete_all) + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20231220185436_create_permissions.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185436_create_permissions.exs new file mode 100644 index 00000000..6b941b25 --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185436_create_permissions.exs @@ -0,0 +1,11 @@ +defmodule Datacoves.Repo.Migrations.CreatePermissions do + use Ecto.Migration + + def change do + create table(:auth_permission) do + add :name, :string + add :content_type_id, :integer + add :codename, :string + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20231220185549_create_user_permissions.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185549_create_user_permissions.exs new file mode 100644 index 00000000..c096c6bb --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20231220185549_create_user_permissions.exs @@ -0,0 +1,10 @@ +defmodule Datacoves.Repo.Migrations.CreateUserPermissions do + use Ecto.Migration + + def change do + create table(:users_user_user_permissions) do + add :user_id, references(:users_user) + add :permission_id, references(:auth_permission) + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20240308184748_create_groups.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20240308184748_create_groups.exs new file mode 100644 index 00000000..aade4536 --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20240308184748_create_groups.exs @@ -0,0 +1,11 @@ +defmodule Datacoves.Repo.Migrations.CreateGroups do + use Ecto.Migration + + def change do + create table(:auth_group) do + add :name, :string + + add :user_id, references(:users_user) + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20240308184927_create_extended_groups.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20240308184927_create_extended_groups.exs new file mode 100644 index 00000000..5ae54d17 --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20240308184927_create_extended_groups.exs @@ -0,0 +1,16 @@ +defmodule Datacoves.Repo.Migrations.CreateExtendedGroups do + use Ecto.Migration + + def change do + create table(:users_extendedgroup) do + add :name, :string + add :identity_groups, {:array, :string} + add :role, :string + + add :account_id, references(:users_account) + add :group_id, references(:auth_group) + add :environment_id, references(:projects_environment) + add :project_id, references(:projects_project) + end + end +end diff --git a/src/core/dbt-api/test/datacoves_repo/migrations/20240308185257_create_users_user_groups.exs b/src/core/dbt-api/test/datacoves_repo/migrations/20240308185257_create_users_user_groups.exs new file mode 100644 index 00000000..4f32b63a --- /dev/null +++ b/src/core/dbt-api/test/datacoves_repo/migrations/20240308185257_create_users_user_groups.exs @@ -0,0 +1,10 @@ +defmodule Datacoves.Repo.Migrations.CreateUsersUserGroups do + use Ecto.Migration + + def change do + create table(:users_user_groups) do + add :user_id, references(:users_user) + add :group_id, references(:auth_group) + end + end +end diff --git a/src/core/dbt-api/test/support/conn_case.ex b/src/core/dbt-api/test/support/conn_case.ex new file mode 100644 index 00000000..bcddeeca --- /dev/null +++ b/src/core/dbt-api/test/support/conn_case.ex @@ -0,0 +1,60 @@ +defmodule JadeWeb.ConnCase do + @moduledoc """ + This module defines the test case to be used by + tests that require setting up a connection. + + Such tests rely on `Phoenix.ConnTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use JadeWeb.ConnCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # The default endpoint for testing + @endpoint JadeWeb.Endpoint + + use JadeWeb, :verified_routes + + # Import conveniences for testing with connections + import Plug.Conn + import Phoenix.ConnTest + import JadeWeb.ConnCase + + import ExUnit.CaptureLog + + # Import OpenApiSpex Schema Assertions + import OpenApiSpex.TestAssertions + + # Import advanced Test Assertions + import Assertions + + # Import Factory functions + import Support.Factory + + # Import Test Helpers + import Support.Helpers + + import Jade.DataCase, only: [reload: 1, reload!: 1] + end + end + + setup tags do + Jade.DataCase.setup_sandbox(tags) + {:ok, conn: Phoenix.ConnTest.build_conn()} + end + + def put_bearer_token(conn, key) do + Plug.Conn.put_req_header(conn, "authorization", "Bearer #{key}") + end + + def api_spec(), do: JadeWeb.OpenApi.Spec.spec() +end diff --git a/src/core/dbt-api/test/support/data_case.ex b/src/core/dbt-api/test/support/data_case.ex new file mode 100644 index 00000000..986b6712 --- /dev/null +++ b/src/core/dbt-api/test/support/data_case.ex @@ -0,0 +1,79 @@ +defmodule Jade.DataCase do + @moduledoc """ + This module defines the setup for tests requiring + access to the application's data layer. + + You may define functions here to be used as helpers in + your tests. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use Jade.DataCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + alias Jade.Repo + + import Ecto + import Ecto.Changeset + import Ecto.Query + import Jade.DataCase + + import ExUnit.CaptureLog + + # Import advanced Test Assertions + import Assertions + + # Import Factory functions + import Support.Factory + + # Import Test Helpers + import Support.Helpers + end + end + + setup tags do + Jade.DataCase.setup_sandbox(tags) + :ok + end + + @doc """ + Sets up the sandbox based on the test tags. + """ + def setup_sandbox(tags) do + jade_pid = Ecto.Adapters.SQL.Sandbox.start_owner!(Jade.Repo, shared: not tags[:async]) + + datacoves_pid = + Ecto.Adapters.SQL.Sandbox.start_owner!(Datacoves.Repo, shared: not tags[:async]) + + on_exit(fn -> + Ecto.Adapters.SQL.Sandbox.stop_owner(jade_pid) + Ecto.Adapters.SQL.Sandbox.stop_owner(datacoves_pid) + end) + end + + def reload!(struct), do: Jade.Repo.reload!(struct) + def reload(struct), do: Jade.Repo.reload(struct) + + @doc """ + A helper that transforms changeset errors into a map of messages. + + assert {:error, changeset} = Accounts.create_user(%{password: "short"}) + assert "password is too short" in errors_on(changeset).password + assert %{password: ["password is too short"]} = errors_on(changeset) + + """ + def errors_on(changeset) do + Ecto.Changeset.traverse_errors(changeset, fn {message, opts} -> + Regex.replace(~r"%{(\w+)}", message, fn _, key -> + opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() + end) + end) + end +end diff --git a/src/core/dbt-api/test/support/factories/airflow.ex b/src/core/dbt-api/test/support/factories/airflow.ex new file mode 100644 index 00000000..ee832594 --- /dev/null +++ b/src/core/dbt-api/test/support/factories/airflow.ex @@ -0,0 +1,88 @@ +defmodule Support.Factories.Airflow do + alias Airflow.Dags.Dag + alias Airflow.DagRuns.DagRun + + def insert(record, attrs \\ [], opts \\ []) + + def insert(:dag, attrs, _opts) do + %Dag{ + dag_id: "python_sample_dag_#{Enum.random(1..1000)}", + default_view: "graph", + description: "Sample python dag dbt run", + fileloc: "/opt/airflow/dags/repo/orchestrate/dags/python_sample_dag.py", + has_import_errors: false, + has_task_concurrency_limits: false, + is_active: true, + is_paused: false, + is_subdag: false, + last_expired: nil, + last_parsed_time: ~U[2023-10-26 14:14:57Z], + last_pickled: nil, + max_active_runs: 16, + max_active_tasks: 16, + next_dagrun_create_after: ~U[2024-01-01 00:00:00Z], + next_dagrun_data_interval_end: ~U[2024-01-01 00:00:00Z], + next_dagrun_data_interval_start: ~U[2023-01-01 00:00:00Z], + next_dagrun: ~U[2023-01-01 00:00:00Z], + owners: "airflow", + pickle_id: nil, + root_dag_id: nil, + schedule_interval: "0 0 1 */12 *", + scheduler_lock: nil, + timetable_description: "At 00:00 on day 1 of the month every 12 months" + } + |> merge_and_insert(attrs) + end + + def insert(:dag_run, attrs, _opts) do + %DagRun{ + dag_id: "sample_project_#{Enum.random(1..100)}", + execution_date: utc_datetime_now(), + state: :queued, + run_id: "manual__#{DateTime.utc_now()}", + external_trigger: true, + conf: <<128, 5, 125, 148, 46>>, + end_date: nil, + start_date: nil, + run_type: "manual", + last_scheduling_decision: nil, + dag_hash: "random-dag-hash", + creating_job_id: nil, + queued_at: utc_datetime_now(), + data_interval_start: gen_datetime(-2), + data_interval_end: gen_datetime(-1), + log_template_id: 1 + } + |> merge_and_insert(attrs) + end + + def insert_pair(record, attrs, opts \\ []), do: insert_list(2, record, attrs, opts) + + def insert_list(count, record, attrs, opts \\ []) do + Enum.map(1..count//1, fn _idx -> insert(record, attrs, opts) end) + end + + def params_for(_record, _attrs), do: raise("Not implemented") + + defp gen_datetime(shift, date \\ Date.utc_today()) do + date |> Date.add(shift) |> DateTime.new!(~T[12:00:00]) |> DateTime.truncate(:second) + end + + defp utc_datetime_now() do + DateTime.utc_now() |> DateTime.truncate(:second) + end + + defp merge_and_insert(struct, attrs) when is_list(attrs) do + merge_and_insert(struct, Map.new(attrs)) + end + + defp merge_and_insert(struct, attrs) do + {repo, attrs} = Map.pop(attrs, :repo) + struct = Map.merge(struct, attrs) + + Airflow.Repo.put_dynamic_repo(repo) + Ecto.Adapters.SQL.Sandbox.mode(Airflow.Repo, :manual) + Ecto.Adapters.SQL.Sandbox.checkout(Airflow.Repo) + Airflow.Repo.insert!(struct) + end +end diff --git a/src/core/dbt-api/test/support/factories/datacoves.ex b/src/core/dbt-api/test/support/factories/datacoves.ex new file mode 100644 index 00000000..5bcc9083 --- /dev/null +++ b/src/core/dbt-api/test/support/factories/datacoves.ex @@ -0,0 +1,145 @@ +defmodule Support.Factories.Datacoves do + use ExMachina.Ecto, repo: Datacoves.Repo + + def account_factory do + %Datacoves.Accounts.Account{ + created_at: ~U[2023-10-25 08:12:01Z], + updated_at: ~U[2023-10-25 08:12:01Z], + name: "Local", + settings: %{}, + deactivated_at: nil, + subscription: %{}, + subscription_updated_at: nil, + slug: "local", + created_by_id: 1, + plan_id: nil, + trial_ends_at: nil, + trial_started_at: nil, + customer_id: nil, + workers_execution_limit: %{"airbyte" => 36000, "airflow" => 36000}, + approve_billing_events: true, + notifications_enabled: %{"billing" => false, "cluster" => false}, + cancelled_subscription: %{}, + developer_licenses: 0 + } + end + + def user_factory do + %Datacoves.Users.User{ + eid: "b4046cab-d625-4294-9ba8-a109e4ad0c7f", + created_at: DateTime.utc_now(), + updated_at: DateTime.utc_now(), + password: "", + last_login: DateTime.utc_now(), + email: "test@test.com", + name: "Test McTester", + avatar: nil, + deactivated_at: nil, + is_superuser: true, + settings: %{}, + is_service_account: false, + slug: "test" + } + end + + def group_factory do + %Datacoves.Groups.Group{ + name: "some group" + } + end + + def extended_group_factory do + %Datacoves.Groups.ExtendedGroup{ + account: build(:account), + group: build(:group), + environment: build(:environment), + project: build(:project), + name: "Local Account Admin", + identity_groups: ["ADMIN-TEST"], + role: "account_admin" + } + end + + def permission_factory do + %Datacoves.Permissions.Permission{ + name: "test.com:env123|service:resource|write", + content_type_id: 1, + codename: "write_resource" + } + end + + def auth_token_factory do + key = for _ <- 0..39, into: "", do: <> + + %Datacoves.AuthTokens.AuthToken{ + key: key, + created: DateTime.utc_now() + } + end + + def project_factory do + %Datacoves.Projects.Project{ + account: build(:account), + repository_id: Enum.random(1..1000), + name: "Fake Project #{Enum.random(1..1000)}", + slug: "fake-project-#{Enum.random(1..1000)}", + release_branch: "main", + clone_strategy: "http_clone", + deploy_credentials: "gAAAAABl-very-long-string", + settings: %{}, + deploy_key_id: nil, + ci_home_url: nil, + ci_provider: nil, + validated_at: nil, + created_at: DateTime.utc_now(), + updated_at: DateTime.utc_now() + } + end + + def environment_factory do + %Datacoves.Environments.Environment{ + airbyte_config: "long-base64-string", + airflow_config: "long-base64-string", + cluster_id: 1, + dbt_docs_config: "long-base64-string", + dbt_home_path: "transform", + dbt_profiles_dir: "automate", + docker_config_secret_name: "docker-config-datacovesprivate", + docker_config: "long-base64-string", + docker_registry: "", + internal_services: %{"minio" => %{"enabled" => false}}, + minio_config: "long-base64-string", + name: "Development", + pomerium_config: "long-base64-string", + profile_id: 1, + quotas: %{}, + release_id: 55, + release_profile: "dbt-snowflake", + services: %{ + "airbyte" => %{"enabled" => false}, + "airflow" => %{"enabled" => false}, + "code-server" => %{ + "enabled" => true, + "unmet_preconditions" => [], + "valid" => true + }, + "dbt-docs" => %{ + "enabled" => true, + "unmet_preconditions" => [], + "valid" => true + }, + "superset" => %{"enabled" => false} + }, + settings: %{}, + slug: "dev123", + superset_config: "long-base64-string", + sync: true, + type: "dev", + update_strategy: "freezed", + workspace_generation: 1, + project: build(:project), + created_at: ~U[2023-11-07 13:47:14Z], + updated_at: ~U[2023-11-07 13:47:26Z] + } + end +end diff --git a/src/core/dbt-api/test/support/factories/jade.ex b/src/core/dbt-api/test/support/factories/jade.ex new file mode 100644 index 00000000..a4c02898 --- /dev/null +++ b/src/core/dbt-api/test/support/factories/jade.ex @@ -0,0 +1,45 @@ +defmodule Support.Factories.Jade do + use ExMachina.Ecto, repo: Jade.Repo + + alias Jade.Files.File + alias Jade.JobIds.JobId + alias Jade.JobRunIds.JobRunId + alias Jade.Manifests.Manifest + + def job_id_factory do + %JobId{ + environment_id: sequence(:dag_environment_id, & &1), + dag_id: sequence(:dag_id, &"python_sample_project_#{&1}") + } + end + + def job_run_id_factory do + %JobRunId{ + environment_id: sequence(:dag_run_environment_id, & &1), + dag_run_id: sequence(:dag_run_id, & &1) + } + end + + def manifest_factory do + %Manifest{ + account_id: sequence(:manifest_account_id, & &1), + environment_slug: "env123", + dag_id: "yaml_dbt_dag", + dag_run_id: 2, + dag_run_run_id: "manual__2023-12-02T09:49:46.105347+00:00", + job_run: build(:job_run_id) + } + end + + def file_factory do + tag = "sample_tag_#{Enum.random(1..100)}" + + %File{ + filename: "#{Ecto.UUID.generate()}.json", + tag: tag, + contents: "testtesttest", + environment_slug: "env123", + path: "/environment/env123/files/#{tag}" + } + end +end diff --git a/src/core/dbt-api/test/support/factory.ex b/src/core/dbt-api/test/support/factory.ex new file mode 100644 index 00000000..73976a1b --- /dev/null +++ b/src/core/dbt-api/test/support/factory.ex @@ -0,0 +1,47 @@ +defmodule Support.Factory do + alias Support.Factories.Airflow + alias Support.Factories.Datacoves + alias Support.Factories.Jade + + def insert(type, attrs \\ [], opts \\ []) do + factory = get_factory(type) + factory.insert(type, attrs, opts) + end + + def insert_pair(type, attrs \\ []) do + factory = get_factory(type) + factory.insert_pair(type, attrs) + end + + def insert_list(count, type, attrs \\ []) do + factory = get_factory(type) + factory.insert_list(count, type, attrs) + end + + def params_for(type, attrs \\ []) do + factory = get_factory(type) + factory.params_for(type, attrs) + end + + defp get_factory(type) do + cond do + type in [:token, :job_id, :job_run_id, :manifest, :file] -> + Jade + + type in [ + :account, + :user, + :permission, + :auth_token, + :project, + :environment, + :group, + :extended_group + ] -> + Datacoves + + type in [:dag, :dag_run] -> + Airflow + end + end +end diff --git a/src/core/dbt-api/test/support/fixtures/fail.txt b/src/core/dbt-api/test/support/fixtures/fail.txt new file mode 100644 index 00000000..57cbf9f5 --- /dev/null +++ b/src/core/dbt-api/test/support/fixtures/fail.txt @@ -0,0 +1 @@ +fail-upload \ No newline at end of file diff --git a/src/core/dbt-api/test/support/fixtures/manifest.json b/src/core/dbt-api/test/support/fixtures/manifest.json new file mode 100644 index 00000000..c7632780 --- /dev/null +++ b/src/core/dbt-api/test/support/fixtures/manifest.json @@ -0,0 +1,21803 @@ +{ + "metadata": { + "dbt_schema_version": "https://schemas.getdbt.com/dbt/manifest/v10.json", + "dbt_version": "1.6.9", + "generated_at": "2024-01-18T14:38:12.611300Z", + "invocation_id": "16236cd9-587b-4aae-8f1b-c05be3d9d994", + "env": {}, + "project_name": "balboa", + "project_id": "84e0991a380d2a451e9a7787e56e2b53", + "user_id": "eab69edc-2ab8-4870-a9ca-fa38fd149f54", + "send_anonymous_usage_stats": true, + "adapter_type": "snowflake" + }, + "nodes": { + "model.balboa.credits_total": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "credits_total", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/credits_total.sql", + "original_file_path": "models/L2_bays/snowflake_usage/credits_total.sql", + "unique_id": "model.balboa.credits_total", + "fqn": ["balboa", "L2_bays", "snowflake_usage", "credits_total"], + "alias": "credits_total", + "checksum": { + "name": "sha256", + "checksum": "672c42b55164fce34d1501b0b2c137936ba18781ce3013aa19942da0bca9658f" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains the total number of credits used over the last 12 months", + "columns": { + "credits_used": { + "name": "credits_used", + "description": "The total number of credits used during the specified period", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_time": { + "name": "start_time", + "description": "The start time for the period during which credits were used", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "rank": { + "name": "rank", + "description": "The rank of the specified period in descending order of credits used", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "day_name": { + "name": "day_name", + "description": "The day of the week for the start time of the specified period", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "tod": { + "name": "tod", + "description": "The time of day for the start time of the specified period", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/credits_total.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.461762, + "relation_name": "BALBOA_DEV.gomezn.credits_total", + "raw_code": "select\n credits_used,\n start_time,\n case\n when dayname(start_time) like 'Mon' then 1\n when dayname(start_time) like 'Tue' then 2\n when dayname(start_time) like 'Wed' then 3\n when dayname(start_time) like 'Thu' then 4\n when dayname(start_time) like 'Fri' then 5\n when dayname(start_time) like 'Sat' then 6\n when dayname(start_time) like 'Sun' then 7\n end as rank,\n dayname(start_time) as day_name,\n hour(start_time) as tod\nfrom\n {{ ref('int_warehouse_metering_history') }}", + "language": "sql", + "refs": [ + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.int_warehouse_metering_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/credits_total.sql", + "compiled": true, + "compiled_code": "select\n credits_used,\n start_time,\n case\n when dayname(start_time) like 'Mon' then 1\n when dayname(start_time) like 'Tue' then 2\n when dayname(start_time) like 'Wed' then 3\n when dayname(start_time) like 'Thu' then 4\n when dayname(start_time) like 'Fri' then 5\n when dayname(start_time) like 'Sat' then 6\n when dayname(start_time) like 'Sun' then 7\n end as rank,\n dayname(start_time) as day_name,\n hour(start_time) as tod\nfrom\n BALBOA_DEV.gomezn.int_warehouse_metering_history", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.storage_usage_m": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "storage_usage_m", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/storage_usage_m.sql", + "original_file_path": "models/L2_bays/snowflake_usage/storage_usage_m.sql", + "unique_id": "model.balboa.storage_usage_m", + "fqn": ["balboa", "L2_bays", "snowflake_usage", "storage_usage_m"], + "alias": "storage_usage_m", + "checksum": { + "name": "sha256", + "checksum": "bba89c175f8fcfc83ba8061d1f25d54482e8f1cfac26664fb27e312c19093513" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains data on storage usage by month for the last 12 months", + "columns": { + "usage_month": { + "name": "usage_month", + "description": "The month (1-12) for which storage usage is being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "storage": { + "name": "storage", + "description": "The total amount of storage used in the given month, in bytes", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/storage_usage_m.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.462434, + "relation_name": "BALBOA_DEV.gomezn.storage_usage_m", + "raw_code": "select\n sum(total_billable_storage_tb) as storage,\n usage_month\nfrom {{ ref('int_storage_usage') }}\nwhere\n datediff(month, usage_month, current_date) <= 12 and datediff(month, usage_month, current_date) >= 1\ngroup by usage_month\norder by usage_month", + "language": "sql", + "refs": [ + { "name": "int_storage_usage", "package": null, "version": null }, + { "name": "int_storage_usage", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.int_storage_usage"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/storage_usage_m.sql", + "compiled": true, + "compiled_code": "select\n sum(total_billable_storage_tb) as storage,\n usage_month\nfrom BALBOA_DEV.gomezn.int_storage_usage\nwhere\n datediff(month, usage_month, current_date) <= 12 and datediff(month, usage_month, current_date) >= 1\ngroup by usage_month\norder by usage_month", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.credits_by_month": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "credits_by_month", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/credits_by_month.sql", + "original_file_path": "models/L2_bays/snowflake_usage/credits_by_month.sql", + "unique_id": "model.balboa.credits_by_month", + "fqn": ["balboa", "L2_bays", "snowflake_usage", "credits_by_month"], + "alias": "credits_by_month", + "checksum": { + "name": "sha256", + "checksum": "068dfe881cf075f732ac05a0e14c35dffba279c0239bb37a3560f8cadc68aead" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains credit usage by month for the last 12 months", + "columns": { + "month_n": { + "name": "month_n", + "description": "The month (1-12) for which credit usage is being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "cumulative_sum": { + "name": "cumulative_sum", + "description": "The cumulative sum of credit usage up to and including the specified month", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/credits_by_month.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.463077, + "relation_name": "BALBOA_DEV.gomezn.credits_by_month", + "raw_code": "with credits_by_month as (\n select\n date_trunc(month, start_time) as month_n,\n sum(credits_used) as monthly_credits\n from {{ ref('int_warehouse_metering_history') }}\n where\n datediff(month, start_time, current_date) >= 1\n group by month_n\n order by month_n asc\n)\n\nselect\n month_n,\n sum(monthly_credits) over (order by month_n asc rows between unbounded preceding and current row) as cumulative_sum\nfrom\n credits_by_month\norder by month_n asc", + "language": "sql", + "refs": [ + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.int_warehouse_metering_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/credits_by_month.sql", + "compiled": true, + "compiled_code": "with credits_by_month as (\n select\n date_trunc(month, start_time) as month_n,\n sum(credits_used) as monthly_credits\n from BALBOA_DEV.gomezn.int_warehouse_metering_history\n where\n datediff(month, start_time, current_date) >= 1\n group by month_n\n order by month_n asc\n)\n\nselect\n month_n,\n sum(monthly_credits) over (order by month_n asc rows between unbounded preceding and current row) as cumulative_sum\nfrom\n credits_by_month\norder by month_n asc", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.credits_mtd": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "credits_mtd", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/credits_mtd.sql", + "original_file_path": "models/L2_bays/snowflake_usage/credits_mtd.sql", + "unique_id": "model.balboa.credits_mtd", + "fqn": ["balboa", "L2_bays", "snowflake_usage", "credits_mtd"], + "alias": "credits_mtd", + "checksum": { + "name": "sha256", + "checksum": "050e49f35e44ba1d618e90052ea9eaeb1ab24c7e20c74112403b0982f2a40e31" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains credits used month to date", + "columns": { + "mtd_credits_used": { + "name": "mtd_credits_used", + "description": "The number of credits used so far in the current month", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "previous_mtd_credits_used": { + "name": "previous_mtd_credits_used", + "description": "The number of credits used in the previous month up to the same date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/credits_mtd.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.4596841, + "relation_name": "BALBOA_DEV.gomezn.credits_mtd", + "raw_code": "select\n credits_used as mtd_credits_used,\n (\n select sum(credits_used) as credits_used_sum\n from\n {{ ref('int_warehouse_metering_history') }}\n where\n timestampdiff(month, start_time, current_date) = 1\n and day(current_date) >= day(start_time)\n ) as previous_mtd_credits_used\nfrom\n {{ ref('int_warehouse_metering_history') }}\nwhere\n timestampdiff(month, start_time, current_date) = 0", + "language": "sql", + "refs": [ + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.int_warehouse_metering_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/credits_mtd.sql", + "compiled": true, + "compiled_code": "select\n credits_used as mtd_credits_used,\n (\n select sum(credits_used) as credits_used_sum\n from\n BALBOA_DEV.gomezn.int_warehouse_metering_history\n where\n timestampdiff(month, start_time, current_date) = 1\n and day(current_date) >= day(start_time)\n ) as previous_mtd_credits_used\nfrom\n BALBOA_DEV.gomezn.int_warehouse_metering_history\nwhere\n timestampdiff(month, start_time, current_date) = 0", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.credits_by_warehouse": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "credits_by_warehouse", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/credits_by_warehouse.sql", + "original_file_path": "models/L2_bays/snowflake_usage/credits_by_warehouse.sql", + "unique_id": "model.balboa.credits_by_warehouse", + "fqn": ["balboa", "L2_bays", "snowflake_usage", "credits_by_warehouse"], + "alias": "credits_by_warehouse", + "checksum": { + "name": "sha256", + "checksum": "3265d64ac96c32805594c7d8efcca41fc71a8469b320888543532c861ab34b3d" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains credits used by warehouse", + "columns": { + "start_time": { + "name": "start_time", + "description": "The start time for the period during which credits were used", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used": { + "name": "credits_used", + "description": "The number of credits used during the specified period", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_name": { + "name": "warehouse_name", + "description": "The name of the warehouse where credits were used", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/credits_by_warehouse.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.460809, + "relation_name": "BALBOA_DEV.gomezn.credits_by_warehouse", + "raw_code": "select\n start_time,\n credits_used,\n warehouse_name\nfrom\n {{ ref('int_warehouse_metering_history') }}\nwhere\n datediff(month, start_time, current_date) >= 1", + "language": "sql", + "refs": [ + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.int_warehouse_metering_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/credits_by_warehouse.sql", + "compiled": true, + "compiled_code": "select\n start_time,\n credits_used,\n warehouse_name\nfrom\n BALBOA_DEV.gomezn.int_warehouse_metering_history\nwhere\n datediff(month, start_time, current_date) >= 1", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.credits_variance": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "credits_variance", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/credits_variance.sql", + "original_file_path": "models/L2_bays/snowflake_usage/credits_variance.sql", + "unique_id": "model.balboa.credits_variance", + "fqn": ["balboa", "L2_bays", "snowflake_usage", "credits_variance"], + "alias": "credits_variance", + "checksum": { + "name": "sha256", + "checksum": "79684aaaa7ca610b1bc5caa61b5e09741ecfc9111bc83bb24b34dcb308916633" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains the variance in credits usage", + "columns": { + "month_n": { + "name": "month_n", + "description": "The current month (1-12) for which credit usage is being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "month_d": { + "name": "month_d", + "description": "The previous month (1-12) for which credit usage is being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "prev_monthly_credits": { + "name": "prev_monthly_credits", + "description": "The total number of credits used in the previous month", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "prev_month": { + "name": "prev_month", + "description": "The name of the previous month", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "monthly_credits": { + "name": "monthly_credits", + "description": "The total number of credits used in the current month", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "difference": { + "name": "difference", + "description": "The difference in credits used between the current and previous months", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "variance": { + "name": "variance", + "description": "The percentage variance in credits used between the current and previous months", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/credits_variance.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.4671109, + "relation_name": "BALBOA_DEV.gomezn.credits_variance", + "raw_code": "with this_month as (\n select\n case\n when timestampdiff(month, start_time, current_date) = 1 then 1\n when timestampdiff(month, start_time, current_date) = 2 then 2\n when timestampdiff(month, start_time, current_date) = 3 then 3\n when timestampdiff(month, start_time, current_date) = 4 then 4\n when timestampdiff(month, start_time, current_date) = 5 then 5\n when timestampdiff(month, start_time, current_date) = 6 then 6\n when timestampdiff(month, start_time, current_date) = 7 then 7\n when timestampdiff(month, start_time, current_date) = 8 then 8\n when timestampdiff(month, start_time, current_date) = 9 then 9\n when timestampdiff(month, start_time, current_date) = 10 then 10\n when timestampdiff(month, start_time, current_date) = 11 then 11\n when timestampdiff(month, start_time, current_date) = 12 then 12\n when timestampdiff(month, start_time, current_date) = 13 then 13\n end as month_n,\n date_trunc(month, start_time) as month_d,\n sum(credits_used) as monthly_credits\n from\n {{ ref('int_warehouse_metering_history') }}\n where\n start_time >= dateadd(month, -13, date_trunc(month, current_date))\n and start_time < date_trunc(month, current_date)\n group by\n month_d, month_n\n order by\n month_d asc\n),\n\nprev_month as (\n select\n case\n when timestampdiff(month, start_time, current_date) = 1 then 0\n when timestampdiff(month, start_time, current_date) = 2 then 1\n when timestampdiff(month, start_time, current_date) = 3 then 2\n when timestampdiff(month, start_time, current_date) = 4 then 3\n when timestampdiff(month, start_time, current_date) = 5 then 4\n when timestampdiff(month, start_time, current_date) = 6 then 5\n when timestampdiff(month, start_time, current_date) = 7 then 6\n when timestampdiff(month, start_time, current_date) = 8 then 7\n when timestampdiff(month, start_time, current_date) = 9 then 8\n when timestampdiff(month, start_time, current_date) = 10 then 9\n when timestampdiff(month, start_time, current_date) = 11 then 10\n when timestampdiff(month, start_time, current_date) = 12 then 11\n when timestampdiff(month, start_time, current_date) = 13 then 12\n end as prev_month,\n date_trunc(month, start_time) as prev_month_d,\n sum(credits_used) as prev_monthly_credits\n from\n {{ ref('int_warehouse_metering_history') }}\n where\n start_time >= dateadd(month, -13, date_trunc(month, current_date))\n and start_time < date_trunc(month, current_date)\n group by\n prev_month_d, prev_month\n order by\n prev_month_d asc\n)\n\nselect\n this_month.month_n,\n this_month.month_d,\n prev_month.prev_month,\n this_month.monthly_credits,\n prev_month.prev_monthly_credits,\n this_month.monthly_credits - prev_month.prev_monthly_credits as difference,\n sum(difference) over (order by this_month.month_n desc rows between unbounded preceding and current row) as variance\nfrom this_month\nleft join prev_month on this_month.month_n = prev_month.prev_month", + "language": "sql", + "refs": [ + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "int_warehouse_metering_history", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.int_warehouse_metering_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/credits_variance.sql", + "compiled": true, + "compiled_code": "with this_month as (\n select\n case\n when timestampdiff(month, start_time, current_date) = 1 then 1\n when timestampdiff(month, start_time, current_date) = 2 then 2\n when timestampdiff(month, start_time, current_date) = 3 then 3\n when timestampdiff(month, start_time, current_date) = 4 then 4\n when timestampdiff(month, start_time, current_date) = 5 then 5\n when timestampdiff(month, start_time, current_date) = 6 then 6\n when timestampdiff(month, start_time, current_date) = 7 then 7\n when timestampdiff(month, start_time, current_date) = 8 then 8\n when timestampdiff(month, start_time, current_date) = 9 then 9\n when timestampdiff(month, start_time, current_date) = 10 then 10\n when timestampdiff(month, start_time, current_date) = 11 then 11\n when timestampdiff(month, start_time, current_date) = 12 then 12\n when timestampdiff(month, start_time, current_date) = 13 then 13\n end as month_n,\n date_trunc(month, start_time) as month_d,\n sum(credits_used) as monthly_credits\n from\n BALBOA_DEV.gomezn.int_warehouse_metering_history\n where\n start_time >= dateadd(month, -13, date_trunc(month, current_date))\n and start_time < date_trunc(month, current_date)\n group by\n month_d, month_n\n order by\n month_d asc\n),\n\nprev_month as (\n select\n case\n when timestampdiff(month, start_time, current_date) = 1 then 0\n when timestampdiff(month, start_time, current_date) = 2 then 1\n when timestampdiff(month, start_time, current_date) = 3 then 2\n when timestampdiff(month, start_time, current_date) = 4 then 3\n when timestampdiff(month, start_time, current_date) = 5 then 4\n when timestampdiff(month, start_time, current_date) = 6 then 5\n when timestampdiff(month, start_time, current_date) = 7 then 6\n when timestampdiff(month, start_time, current_date) = 8 then 7\n when timestampdiff(month, start_time, current_date) = 9 then 8\n when timestampdiff(month, start_time, current_date) = 10 then 9\n when timestampdiff(month, start_time, current_date) = 11 then 10\n when timestampdiff(month, start_time, current_date) = 12 then 11\n when timestampdiff(month, start_time, current_date) = 13 then 12\n end as prev_month,\n date_trunc(month, start_time) as prev_month_d,\n sum(credits_used) as prev_monthly_credits\n from\n BALBOA_DEV.gomezn.int_warehouse_metering_history\n where\n start_time >= dateadd(month, -13, date_trunc(month, current_date))\n and start_time < date_trunc(month, current_date)\n group by\n prev_month_d, prev_month\n order by\n prev_month_d asc\n)\n\nselect\n this_month.month_n,\n this_month.month_d,\n prev_month.prev_month,\n this_month.monthly_credits,\n prev_month.prev_monthly_credits,\n this_month.monthly_credits - prev_month.prev_monthly_credits as difference,\n sum(difference) over (order by this_month.month_n desc rows between unbounded preceding and current row) as variance\nfrom this_month\nleft join prev_month on this_month.month_n = prev_month.prev_month", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.query_utilization": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "query_utilization", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/query_utilization.sql", + "original_file_path": "models/L2_bays/snowflake_usage/query_utilization.sql", + "unique_id": "model.balboa.query_utilization", + "fqn": ["balboa", "L2_bays", "snowflake_usage", "query_utilization"], + "alias": "query_utilization", + "checksum": { + "name": "sha256", + "checksum": "33bb4c3a7ad4e61980f9ca589449f7a57a4a76c4515ea180317dd62d3b1c7f76" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains the final data on query utilization", + "columns": { + "query_id": { + "name": "query_id", + "description": "The ID of the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "database_name": { + "name": "database_name", + "description": "The name of the database in which the query was run", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "schema_name": { + "name": "schema_name", + "description": "The name of the schema in which the query was run", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_name": { + "name": "warehouse_name", + "description": "The name of the warehouse on which the query was run", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_time": { + "name": "query_time", + "description": "The total time taken by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "role": { + "name": "role", + "description": "The role of the user running the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "user_name": { + "name": "user_name", + "description": "The name of the user running the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_status": { + "name": "query_status", + "description": "The status of the query (success/failure)", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_time": { + "name": "start_time", + "description": "The start time of the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_fail_percentage": { + "name": "query_fail_percentage", + "description": "The percentage of failed queries out of the total queries run", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "avg_queries_per_user": { + "name": "avg_queries_per_user", + "description": "The average number of queries run per user", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/query_utilization.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.465705, + "relation_name": "BALBOA_DEV.gomezn.query_utilization", + "raw_code": "with query_fail as (\n select count_if(query_status like 'FAIL') / count(query_status) * 100 as query_fail_percentage\n from {{ ref('int_query_history') }}\n),\n\nqueries_per_user as (\n select count(query_id) / count(distinct user_name) as queries\n from {{ ref('int_query_history') }}\n)\nselect\n query_id,\n database_name,\n schema_name,\n warehouse_name,\n query_time,\n role,\n user_name,\n query_status,\n start_time,\n (select * from query_fail) as query_fail_percentage,\n (select * from queries_per_user) as avg_queries_per_user\nfrom {{ ref('int_query_history') }}", + "language": "sql", + "refs": [ + { "name": "int_query_history", "package": null, "version": null }, + { "name": "int_query_history", "package": null, "version": null }, + { "name": "int_query_history", "package": null, "version": null }, + { "name": "int_query_history", "package": null, "version": null }, + { "name": "int_query_history", "package": null, "version": null }, + { "name": "int_query_history", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.int_query_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/query_utilization.sql", + "compiled": true, + "compiled_code": "with query_fail as (\n select count_if(query_status like 'FAIL') / count(query_status) * 100 as query_fail_percentage\n from BALBOA_DEV.gomezn.int_query_history\n),\n\nqueries_per_user as (\n select count(query_id) / count(distinct user_name) as queries\n from BALBOA_DEV.gomezn.int_query_history\n)\nselect\n query_id,\n database_name,\n schema_name,\n warehouse_name,\n query_time,\n role,\n user_name,\n query_status,\n start_time,\n (select * from query_fail) as query_fail_percentage,\n (select * from queries_per_user) as avg_queries_per_user\nfrom BALBOA_DEV.gomezn.int_query_history", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.int_pipe_usage_history": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "int_pipe_usage_history", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/intermediate/int_pipe_usage_history.sql", + "original_file_path": "models/L2_bays/snowflake_usage/intermediate/int_pipe_usage_history.sql", + "unique_id": "model.balboa.int_pipe_usage_history", + "fqn": [ + "balboa", + "L2_bays", + "snowflake_usage", + "intermediate", + "int_pipe_usage_history" + ], + "alias": "int_pipe_usage_history", + "checksum": { + "name": "sha256", + "checksum": "faf91bb5ffc8f6989ce5d35aacab8b7e95fff50603d483526dad8e9dc75c25d9" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "pipe usage history staging model", + "columns": { + "pipe_id": { + "name": "pipe_id", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "pipe_name": { + "name": "pipe_name", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_time": { + "name": "start_time", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "end_time": { + "name": "end_time", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used": { + "name": "credits_used", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_inserted": { + "name": "bytes_inserted", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "files_inserted": { + "name": "files_inserted", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_date": { + "name": "start_date", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "pipeline_operation_hours": { + "name": "pipeline_operation_hours", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "time_of_day": { + "name": "time_of_day", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/intermediate/int_pipe_usage_history.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.469976, + "relation_name": "BALBOA_DEV.gomezn.int_pipe_usage_history", + "raw_code": "select\n pipe_id,\n pipe_name,\n start_time,\n end_time,\n credits_used,\n bytes_inserted,\n files_inserted,\n to_date(start_time) as start_date,\n datediff(hour, start_time, end_time) as pipeline_operation_hours,\n hour(start_time) as time_of_day\nfrom {{ ref('pipe_usage_history') }}\norder by to_date(start_time) desc", + "language": "sql", + "refs": [ + { "name": "pipe_usage_history", "package": null, "version": null }, + { "name": "pipe_usage_history", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.pipe_usage_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/intermediate/int_pipe_usage_history.sql", + "compiled": true, + "compiled_code": "select\n pipe_id,\n pipe_name,\n start_time,\n end_time,\n credits_used,\n bytes_inserted,\n files_inserted,\n to_date(start_time) as start_date,\n datediff(hour, start_time, end_time) as pipeline_operation_hours,\n hour(start_time) as time_of_day\nfrom BALBOA_DEV.gomezn.pipe_usage_history\norder by to_date(start_time) desc", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.int_storage_usage": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "int_storage_usage", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/intermediate/int_storage_usage.sql", + "original_file_path": "models/L2_bays/snowflake_usage/intermediate/int_storage_usage.sql", + "unique_id": "model.balboa.int_storage_usage", + "fqn": [ + "balboa", + "L2_bays", + "snowflake_usage", + "intermediate", + "int_storage_usage" + ], + "alias": "int_storage_usage", + "checksum": { + "name": "sha256", + "checksum": "3769a7c40ef43bf07d143784348846aa8a5fda1b161fd95d84590f490452a17d" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "storage usage staging model", + "columns": { + "usage_month": { + "name": "usage_month", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_billable_storage_tb": { + "name": "total_billable_storage_tb", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "storage_billable_storage_tb": { + "name": "storage_billable_storage_tb", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "stage_billable_storage_tb": { + "name": "stage_billable_storage_tb", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "failsafe_billable_storage_tb": { + "name": "failsafe_billable_storage_tb", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/intermediate/int_storage_usage.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.4712949, + "relation_name": "BALBOA_DEV.gomezn.int_storage_usage", + "raw_code": "select\n date_trunc(month, usage_date) as usage_month,\n avg(storage_bytes + stage_bytes + failsafe_bytes) / power(1024, 4) as total_billable_storage_tb,\n avg(storage_bytes) / power(1024, 4) as storage_billable_storage_tb,\n avg(stage_bytes) / power(1024, 4) as stage_billable_storage_tb,\n avg(failsafe_bytes) / power(1024, 4) as failsafe_billable_storage_tb\nfrom {{ ref('storage_usage') }}\ngroup by date_trunc(month, usage_date)\norder by date_trunc(month, usage_date)", + "language": "sql", + "refs": [ + { "name": "storage_usage", "package": null, "version": null }, + { "name": "storage_usage", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.storage_usage"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/intermediate/int_storage_usage.sql", + "compiled": true, + "compiled_code": "select\n date_trunc(month, usage_date) as usage_month,\n avg(storage_bytes + stage_bytes + failsafe_bytes) / power(1024, 4) as total_billable_storage_tb,\n avg(storage_bytes) / power(1024, 4) as storage_billable_storage_tb,\n avg(stage_bytes) / power(1024, 4) as stage_billable_storage_tb,\n avg(failsafe_bytes) / power(1024, 4) as failsafe_billable_storage_tb\nfrom BALBOA_DEV.gomezn.storage_usage\ngroup by date_trunc(month, usage_date)\norder by date_trunc(month, usage_date)", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.int_warehouse_metering_history": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "int_warehouse_metering_history", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/intermediate/int_warehouse_metering_history.sql", + "original_file_path": "models/L2_bays/snowflake_usage/intermediate/int_warehouse_metering_history.sql", + "unique_id": "model.balboa.int_warehouse_metering_history", + "fqn": [ + "balboa", + "L2_bays", + "snowflake_usage", + "intermediate", + "int_warehouse_metering_history" + ], + "alias": "int_warehouse_metering_history", + "checksum": { + "name": "sha256", + "checksum": "2fd8829d56d831aca4c74972e55521aa111cc3ea4f19715564d211c701a55105" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "warehouse metering history staging model", + "columns": { + "start_time": { + "name": "start_time", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "end_time": { + "name": "end_time", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_id": { + "name": "warehouse_id", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_name": { + "name": "warehouse_name", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used": { + "name": "credits_used", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_date": { + "name": "start_date", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_operation_hours": { + "name": "warehouse_operation_hours", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "time_of_day": { + "name": "time_of_day", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/intermediate/int_warehouse_metering_history.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.472453, + "relation_name": "BALBOA_DEV.gomezn.int_warehouse_metering_history", + "raw_code": "select\n start_time,\n end_time,\n warehouse_id,\n warehouse_name,\n credits_used,\n month(start_time) as start_date,\n datediff(hour, start_time, end_time) as warehouse_operation_hours,\n hour(start_time) as time_of_day\nfrom {{ ref('warehouse_metering_history') }}", + "language": "sql", + "refs": [ + { + "name": "warehouse_metering_history", + "package": null, + "version": null + }, + { + "name": "warehouse_metering_history", + "package": null, + "version": null + } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.warehouse_metering_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/intermediate/int_warehouse_metering_history.sql", + "compiled": true, + "compiled_code": "select\n start_time,\n end_time,\n warehouse_id,\n warehouse_name,\n credits_used,\n month(start_time) as start_date,\n datediff(hour, start_time, end_time) as warehouse_operation_hours,\n hour(start_time) as time_of_day\nfrom BALBOA_DEV.gomezn.warehouse_metering_history", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.int_query_history": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "int_query_history", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/snowflake_usage/intermediate/int_query_history.sql", + "original_file_path": "models/L2_bays/snowflake_usage/intermediate/int_query_history.sql", + "unique_id": "model.balboa.int_query_history", + "fqn": [ + "balboa", + "L2_bays", + "snowflake_usage", + "intermediate", + "int_query_history" + ], + "alias": "int_query_history", + "checksum": { + "name": "sha256", + "checksum": "75c9e574351e2df2db5741ad41e7199042e427ccf2ebc901dccc663a4b471730" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_snowflake_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "staging model for query history", + "columns": { + "query_id": { + "name": "query_id", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "database_name": { + "name": "database_name", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "schema_name": { + "name": "schema_name", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_name": { + "name": "warehouse_name", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_time": { + "name": "query_time", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "role": { + "name": "role", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "user_name": { + "name": "user_name", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_status": { + "name": "query_status", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_time": { + "name": "start_time", + "description": "", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/snowflake_usage/intermediate/int_query_history.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_snowflake_usage", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.467672, + "relation_name": "BALBOA_DEV.gomezn.int_query_history", + "raw_code": "select\n query_id,\n database_name,\n schema_name,\n warehouse_name,\n total_elapsed_time as query_time,\n role_name as role,\n user_name,\n execution_status as query_status,\n start_time\nfrom {{ ref('query_history') }}", + "language": "sql", + "refs": [ + { "name": "query_history", "package": null, "version": null }, + { "name": "query_history", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.query_history"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/snowflake_usage/intermediate/int_query_history.sql", + "compiled": true, + "compiled_code": "select\n query_id,\n database_name,\n schema_name,\n warehouse_name,\n total_elapsed_time as query_time,\n role_name as role,\n user_name,\n execution_status as query_status,\n start_time\nfrom BALBOA_DEV.gomezn.query_history", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.current_population": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "current_population", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/country_demographics/current_population.sql", + "original_file_path": "models/L2_bays/country_demographics/current_population.sql", + "unique_id": "model.balboa.current_population", + "fqn": [ + "balboa", + "L2_bays", + "country_demographics", + "current_population" + ], + "alias": "current_population", + "checksum": { + "name": "sha256", + "checksum": "3ba64f6490e17dd98394401db00d55d8699f3668c3ca794e85c4965bc80965c3" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_country_demographics", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Current Population by Country", + "columns": { + "country_code": { + "name": "country_code", + "description": "3 Letter Country Code", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "country_name": { + "name": "country_name", + "description": "Name of the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "value": { + "name": "value", + "description": "Total population for the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "year": { + "name": "year", + "description": "Year population was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/country_demographics/current_population.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_country_demographics", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.474257, + "relation_name": "BALBOA_DEV.gomezn.current_population", + "raw_code": "with population_rank as (\n select\n country_code,\n country_name,\n value,\n year,\n rank() over (\n partition by country_code, country_name order by year desc\n ) as rank_years\n from {{ ref('country_populations') }}\n)\n\nselect\n country_code,\n country_name,\n value,\n year\nfrom population_rank\nwhere\n rank_years = 1\n and year > 2017", + "language": "sql", + "refs": [ + { "name": "country_populations", "package": null, "version": null }, + { "name": "country_populations", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.country_populations"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/country_demographics/current_population.sql", + "compiled": true, + "compiled_code": "with population_rank as (\n select\n country_code,\n country_name,\n value,\n year,\n rank() over (\n partition by country_code, country_name order by year desc\n ) as rank_years\n from BALBOA_DEV.gomezn.country_populations\n)\n\nselect\n country_code,\n country_name,\n value,\n year\nfrom population_rank\nwhere\n rank_years = 1\n and year > 2017", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.covid_location": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "covid_location", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/covid_observations/covid_location.sql", + "original_file_path": "models/L2_bays/covid_observations/covid_location.sql", + "unique_id": "model.balboa.covid_location", + "fqn": ["balboa", "L2_bays", "covid_observations", "covid_location"], + "alias": "covid_location", + "checksum": { + "name": "sha256", + "checksum": "fe836b041345c97719cccf953b7cfedc2e006c539ece5b6eb7e4b390df632733" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_covid_observations", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "covid data location information", + "columns": { + "iso3166_1": { + "name": "iso3166_1", + "description": "ISO 3166-1 code for the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "county": { + "name": "county", + "description": "Name of the county", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "location_id": { + "name": "location_id", + "description": "Unique identifier for the location", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "state": { + "name": "state", + "description": "Name of the state", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "country": { + "name": "country", + "description": "Name of the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "iso3166_2": { + "name": "iso3166_2", + "description": "ISO 3166-2 code for the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "lat": { + "name": "lat", + "description": "Latitude coordinate", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "long": { + "name": "long", + "description": "Longitude coordinate", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/covid_observations/covid_location.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_covid_observations", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.494126, + "relation_name": "BALBOA_DEV.gomezn.covid_location", + "raw_code": "{{ config(materialized=\"view\") }}\n\nwith jhu_covid_19 as (\n select distinct\n country_region,\n coalesce(province_state, 'UNDEFINED') as province_state,\n coalesce(county, 'UNDEFINED') as county,\n lat,\n long,\n iso3166_1,\n iso3166_2,\n date\n from {{ ref(\"jhu_covid_19\") }}\n),\n\nrank_locations as (\n select\n hash(\n country_region || '|' || province_state || '|' || county\n ) as snowflake_location_id,\n {{\n dbt_utils.generate_surrogate_key(\n [\"country_region\", \"province_state\", \"county\"]\n )\n }} as location_id,\n country_region as country,\n province_state as state,\n county,\n lat,\n long,\n iso3166_1,\n iso3166_2,\n rank() over (partition by location_id order by date desc) as rowrank\n from jhu_covid_19\n)\n\nselect\n location_id,\n country,\n state,\n county,\n lat,\n long,\n iso3166_1,\n iso3166_2\nfrom rank_locations\nwhere rowrank = 1", + "language": "sql", + "refs": [ + { "name": "jhu_covid_19", "package": null, "version": null }, + { "name": "jhu_covid_19", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.balboa.ref", + "macro.dbt_utils.generate_surrogate_key" + ], + "nodes": ["model.balboa.jhu_covid_19"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/covid_observations/covid_location.sql", + "compiled": true, + "compiled_code": "\n\nwith jhu_covid_19 as (\n select distinct\n country_region,\n coalesce(province_state, 'UNDEFINED') as province_state,\n coalesce(county, 'UNDEFINED') as county,\n lat,\n long,\n iso3166_1,\n iso3166_2,\n date\n from BALBOA_DEV.gomezn.jhu_covid_19\n),\n\nrank_locations as (\n select\n hash(\n country_region || '|' || province_state || '|' || county\n ) as snowflake_location_id,\n md5(cast(coalesce(cast(country_region as TEXT), '_dbt_utils_surrogate_key_null_') || '-' || coalesce(cast(province_state as TEXT), '_dbt_utils_surrogate_key_null_') || '-' || coalesce(cast(county as TEXT), '_dbt_utils_surrogate_key_null_') as TEXT)) as location_id,\n country_region as country,\n province_state as state,\n county,\n lat,\n long,\n iso3166_1,\n iso3166_2,\n rank() over (partition by location_id order by date desc) as rowrank\n from jhu_covid_19\n)\n\nselect\n location_id,\n country,\n state,\n county,\n lat,\n long,\n iso3166_1,\n iso3166_2\nfrom rank_locations\nwhere rowrank = 1", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.base_cases": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "base_cases", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/covid_observations/base_cases.sql", + "original_file_path": "models/L2_bays/covid_observations/base_cases.sql", + "unique_id": "model.balboa.base_cases", + "fqn": ["balboa", "L2_bays", "covid_observations", "base_cases"], + "alias": "base_cases", + "checksum": { + "name": "sha256", + "checksum": "7ad318a08a5fea540341f2d22460a1af366aa67ce7c225fe15b291c16825f6b8" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_covid_observations", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains reported COVID-19 cases", + "columns": { + "cases": { + "name": "cases", + "description": "The number of reported COVID-19 cases for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "date": { + "name": "date", + "description": "The date when the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "fips": { + "name": "fips", + "description": "Federal Information Processing Standards (FIPS) code for the county where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "country_region": { + "name": "country_region", + "description": "The name of the country or region where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "province_state": { + "name": "province_state", + "description": "The name of the province or state where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "county": { + "name": "county", + "description": "The name of the county where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "case_type": { + "name": "case_type", + "description": "The type of COVID-19 case (confirmed, deaths, recovered)", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "long": { + "name": "long", + "description": "The longitude coordinate of the location where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "lat": { + "name": "lat", + "description": "The latitude coordinate of the location where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "iso3166_1": { + "name": "iso3166_1", + "description": "The ISO 3166-1 alpha-2 code for the country where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "iso3166_2": { + "name": "iso3166_2", + "description": "The ISO 3166-2 code for the country where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "difference": { + "name": "difference", + "description": "The difference in case numbers from the previous day's data", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_updated_date": { + "name": "last_updated_date", + "description": "The date when the data was last updated", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_reported_flag": { + "name": "last_reported_flag", + "description": "A flag indicating whether the data is the most recently reported for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "new_cases": { + "name": "new_cases", + "description": "The number of new COVID-19 cases reported for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/covid_observations/base_cases.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_covid_observations", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.4918149, + "relation_name": "BALBOA_DEV.gomezn.base_cases", + "raw_code": "with final as (\n\n select\n *,\n difference as new_cases\n from {{ ref('jhu_covid_19') }}\n\n)\n\nselect * from final", + "language": "sql", + "refs": [ + { "name": "jhu_covid_19", "package": null, "version": null }, + { "name": "jhu_covid_19", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": ["model.balboa.jhu_covid_19"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/covid_observations/base_cases.sql", + "compiled": true, + "compiled_code": "with final as (\n\n select\n *,\n difference as new_cases\n from BALBOA_DEV.gomezn.jhu_covid_19\n\n)\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.total_covid_cases": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "total_covid_cases", + "resource_type": "model", + "package_name": "balboa", + "path": "L2_bays/covid_observations/total_covid_cases.sql", + "original_file_path": "models/L2_bays/covid_observations/total_covid_cases.sql", + "unique_id": "model.balboa.total_covid_cases", + "fqn": ["balboa", "L2_bays", "covid_observations", "total_covid_cases"], + "alias": "total_covid_cases", + "checksum": { + "name": "sha256", + "checksum": "721b4a82203a30ecf57c6d746a5e629fbeb405984907822c8f0ec35eccdd7f9b" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l2_covid_observations", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#000899" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains cleaned-up COVID-19 cases data from John Hopkins University", + "columns": { + "location_id": { + "name": "location_id", + "description": "An ID that represents a location where the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "date": { + "name": "date", + "description": "The date when the COVID-19 cases were reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "confirmed": { + "name": "confirmed", + "description": "The number of confirmed COVID-19 cases for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "deaths": { + "name": "deaths", + "description": "The number of COVID-19 deaths for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "active": { + "name": "active", + "description": "The number of active COVID-19 cases for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "recovered": { + "name": "recovered", + "description": "The number of recovered COVID-19 cases for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#000899" }, + "patch_path": "balboa://models/L2_bays/covid_observations/total_covid_cases.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l2_covid_observations", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#000899" } + }, + "created_at": 1705588677.497196, + "relation_name": "BALBOA_DEV.gomezn.total_covid_cases", + "raw_code": "with raw_cases as (\n select\n country_region,\n province_state,\n county,\n new_cases,\n date,\n case_type\n from {{ ref('base_cases') }}\n),\n\ncreate_location_id as (\n select\n {{ dbt_utils.generate_surrogate_key(['country_region', 'province_state', 'county']) }} as location_id, --noqa\n new_cases,\n date,\n case_type\n from raw_cases\n),\n\npivoted_model as (\n select\n location_id,\n date,\n sum(\"'Confirmed'\") as confirmed,\n sum(\"'Deaths'\") as deaths,\n sum(\"'Active'\") as active,\n sum(\"'Recovered'\") as recovered\n from create_location_id\n pivot (sum(new_cases) for case_type in ('Confirmed', 'Deaths', 'Active', 'Recovered')) as case_pivot\n group by location_id, date\n)\n\nselect *\nfrom pivoted_model", + "language": "sql", + "refs": [ + { "name": "base_cases", "package": null, "version": null }, + { "name": "base_cases", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.balboa.ref", + "macro.dbt_utils.generate_surrogate_key" + ], + "nodes": ["model.balboa.base_cases"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/covid_observations/total_covid_cases.sql", + "compiled": true, + "compiled_code": "with raw_cases as (\n select\n country_region,\n province_state,\n county,\n new_cases,\n date,\n case_type\n from BALBOA_DEV.gomezn.base_cases\n),\n\ncreate_location_id as (\n select\n md5(cast(coalesce(cast(country_region as TEXT), '_dbt_utils_surrogate_key_null_') || '-' || coalesce(cast(province_state as TEXT), '_dbt_utils_surrogate_key_null_') || '-' || coalesce(cast(county as TEXT), '_dbt_utils_surrogate_key_null_') as TEXT)) as location_id, --noqa\n new_cases,\n date,\n case_type\n from raw_cases\n),\n\npivoted_model as (\n select\n location_id,\n date,\n sum(\"'Confirmed'\") as confirmed,\n sum(\"'Deaths'\") as deaths,\n sum(\"'Active'\") as active,\n sum(\"'Recovered'\") as recovered\n from create_location_id\n pivot (sum(new_cases) for case_type in ('Confirmed', 'Deaths', 'Active', 'Recovered')) as case_pivot\n group by location_id, date\n)\n\nselect *\nfrom pivoted_model", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.personal_loans": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "personal_loans", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/loans/personal_loans.sql", + "original_file_path": "models/L1_inlets/loans/personal_loans.sql", + "unique_id": "model.balboa.personal_loans", + "fqn": ["balboa", "L1_inlets", "loans", "personal_loans"], + "alias": "personal_loans", + "checksum": { + "name": "sha256", + "checksum": "da893ffb90a2fbbc9488dd7609ff969d7bf74fcb1b964a8029f2d1c44e333bbe" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_loans", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains data on personal loans", + "columns": { + "addr_state": { + "name": "addr_state", + "description": "The state in which the borrower resides", + "meta": { "masking_policy": "masking_policy_pii_string" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "annual_inc": { + "name": "annual_inc", + "description": "The borrower's annual income", + "meta": { "masking_policy": "masking_policy_pii_float" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "collections_12_mths_ex_med": { + "name": "collections_12_mths_ex_med", + "description": "Number of collections in the last 12 months excluding medical collections", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "collection_recovery_fee": { + "name": "collection_recovery_fee", + "description": "Post charge off collection fee", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "delinq_2yrs": { + "name": "delinq_2yrs", + "description": "The number of 30+ days past-due incidences of delinquency in the borrower's credit file for the past 2 years", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "desc": { + "name": "desc", + "description": "Loan description provided by the borrower", + "meta": { "masking_policy": "masking_policy_pii_string" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "dti": { + "name": "dti", + "description": "The borrower's debt-to-income ratio", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "earliest_cr_line": { + "name": "earliest_cr_line", + "description": "The month the borrower's earliest reported credit line was opened", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "emp_length": { + "name": "emp_length", + "description": "Employment length in years", + "meta": { "masking_policy": "masking_policy_pii_string" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "emp_title": { + "name": "emp_title", + "description": "The job title supplied by the borrower when applying for the loan", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "funded_amnt": { + "name": "funded_amnt", + "description": "The total amount committed to that loan at that point in time", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "funded_amnt_inv": { + "name": "funded_amnt_inv", + "description": "The total amount committed by investors for that loan at that point in time", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "grade": { + "name": "grade", + "description": "LC assigned loan grade", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "home_ownership": { + "name": "home_ownership", + "description": "The home ownership status provided by the borrower during registration", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "initial_list_status": { + "name": "initial_list_status", + "description": "The initial listing status of the loan", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "inq_last_6mths": { + "name": "inq_last_6mths", + "description": "The number of inquiries in past 6 months (excluding auto and mortgage inquiries)", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "installment": { + "name": "installment", + "description": "The monthly payment owed by the borrower if the loan originates", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "int_rate": { + "name": "int_rate", + "description": "Interest rate on the loan", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "issue_d": { + "name": "issue_d", + "description": "The month which the loan was funded", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_credit_pull_d": { + "name": "last_credit_pull_d", + "description": "The most recent month LC pulled credit for this loan", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_pymnt_amnt": { + "name": "last_pymnt_amnt", + "description": "Last total payment amount received", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_pymnt_d": { + "name": "last_pymnt_d", + "description": "Last month payment was received", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "loan_amnt": { + "name": "loan_amnt", + "description": "The listed amount of the loan applied for by the borrower", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "loan_id": { + "name": "loan_id", + "description": "A unique identifier for the loan", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "loan_status": { + "name": "loan_status", + "description": "Current status of the loan", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "member_id": { + "name": "member_id", + "description": "A unique identifier for the borrower", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "mths_since_last_delinq": { + "name": "mths_since_last_delinq", + "description": "The number of months since the borrower's last delinquency", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "mths_since_last_major_derog": { + "name": "mths_since_last_major_derog", + "description": "Months since most recent 90-day or worse rating", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "mths_since_last_record": { + "name": "mths_since_last_record", + "description": "The number of months since the last public record", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "next_pymnt_d": { + "name": "next_pymnt_d", + "description": "Next scheduled payment date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "open_acc": { + "name": "open_acc", + "description": "The number of open credit lines in the borrower's credit file", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "out_prncp": { + "name": "out_prncp", + "description": "Remaining outstanding principal for total amount funded", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "out_prncp_inv": { + "name": "out_prncp_inv", + "description": "Remaining outstanding principal for portion of total amount funded by investors", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "policy_code": { + "name": "policy_code", + "description": "Publicly available", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "pub_rec": { + "name": "pub_rec", + "description": "Number of derogatory public records", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "purpose": { + "name": "purpose", + "description": "A category provided by the borrower for the loan request", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "pymnt_plan": { + "name": "pymnt_plan", + "description": "Indicates if a payment plan has been put in place for the loan", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "recoveries": { + "name": "recoveries", + "description": "Post charge off gross recovery", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "revol_bal": { + "name": "revol_bal", + "description": "Total credit revolving balance", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "revol_util": { + "name": "revol_util", + "description": "Revolving line utilization rate, or the amount of credit the borrower is using relative to all available revolving credit", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "sub_grade": { + "name": "sub_grade", + "description": "LC assigned loan subgrade", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "term": { + "name": "term", + "description": "The number of payments on the loan. Values are in months and can be either 36 or 60", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "title": { + "name": "title", + "description": "The loan title provided by the borrower", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_acc": { + "name": "total_acc", + "description": "The total number of credit lines currently in the borrower's credit file", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_pymnt": { + "name": "total_pymnt", + "description": "Payments received to date for total amount funded", + "meta": { "masking_policy": "masking_policy_pii_float" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_pymnt_inv": { + "name": "total_pymnt_inv", + "description": "Payments received to date for portion of total amount funded by investors", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_rec_int": { + "name": "total_rec_int", + "description": "Interest received to date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_rec_late_fee": { + "name": "total_rec_late_fee", + "description": "Late fees received to date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_rec_prncp": { + "name": "total_rec_prncp", + "description": "Principal received to date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "url": { + "name": "url", + "description": "URL for the LC page with listing data", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "verification_status": { + "name": "verification_status", + "description": "Indicates if income was verified by LC, not verified, or if the income source was verified", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "zip_code": { + "name": "zip_code", + "description": "The first 3 numbers of the zip code provided by the borrower in the loan application", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/loans/personal_loans.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_loans", + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.518189, + "relation_name": "BALBOA_DEV.gomezn.personal_loans", + "raw_code": "with raw_source as (\n\n select *\n from {{ source('LOANS', 'PERSONAL_LOANS') }}\n\n),\n\nfinal as (\n\n select\n \"TOTAL_ACC\"::float as total_acc,\n \"ANNUAL_INC\"::float as annual_inc,\n \"EMP_LENGTH\"::varchar as emp_length,\n \"DESC\"::varchar as desc,\n \"TOTAL_PYMNT\"::float as total_pymnt,\n \"LAST_PYMNT_D\"::varchar as last_pymnt_d,\n \"ADDR_STATE\"::varchar as addr_state,\n \"NEXT_PYMNT_D\"::varchar as next_pymnt_d,\n \"EMP_TITLE\"::varchar as emp_title,\n \"COLLECTION_RECOVERY_FEE\"::float as collection_recovery_fee,\n \"MTHS_SINCE_LAST_MAJOR_DEROG\"::float as mths_since_last_major_derog,\n \"INQ_LAST_6MTHS\"::float as inq_last_6mths,\n \"SUB_GRADE\"::varchar as sub_grade,\n \"FUNDED_AMNT_INV\"::float as funded_amnt_inv,\n \"DELINQ_2YRS\"::float as delinq_2yrs,\n \"LOAN_ID\"::varchar as loan_id,\n \"FUNDED_AMNT\"::float as funded_amnt,\n \"VERIFICATION_STATUS\"::varchar as verification_status,\n \"DTI\"::float as dti,\n \"TOTAL_REC_PRNCP\"::float as total_rec_prncp,\n \"GRADE\"::varchar as grade,\n \"HOME_OWNERSHIP\"::varchar as home_ownership,\n \"ISSUE_D\"::varchar as issue_d,\n \"MTHS_SINCE_LAST_DELINQ\"::float as mths_since_last_delinq,\n \"OUT_PRNCP\"::float as out_prncp,\n \"PUB_REC\"::float as pub_rec,\n \"INT_RATE\"::float as int_rate,\n \"ZIP_CODE\"::varchar as zip_code,\n \"OPEN_ACC\"::float as open_acc,\n \"TERM\"::varchar as term,\n \"PYMNT_PLAN\"::varchar as pymnt_plan,\n \"URL\"::varchar as url,\n \"REVOL_BAL\"::float as revol_bal,\n \"RECOVERIES\"::float as recoveries,\n \"LAST_PYMNT_AMNT\"::float as last_pymnt_amnt,\n \"LOAN_AMNT\"::float as loan_amnt,\n \"PURPOSE\"::varchar as purpose,\n \"INITIAL_LIST_STATUS\"::varchar as initial_list_status,\n \"TOTAL_REC_INT\"::float as total_rec_int,\n \"TOTAL_PYMNT_INV\"::float as total_pymnt_inv,\n \"MTHS_SINCE_LAST_RECORD\"::float as mths_since_last_record,\n \"LAST_CREDIT_PULL_D\"::varchar as last_credit_pull_d,\n \"TOTAL_REC_LATE_FEE\"::float as total_rec_late_fee,\n \"MEMBER_ID\"::float as member_id,\n \"POLICY_CODE\"::float as policy_code,\n \"TITLE\"::varchar as title,\n \"LOAN_STATUS\"::varchar as loan_status,\n \"INSTALLMENT\"::float as installment,\n \"EARLIEST_CR_LINE\"::varchar as earliest_cr_line,\n \"REVOL_UTIL\"::varchar as revol_util,\n \"OUT_PRNCP_INV\"::float as out_prncp_inv,\n \"COLLECTIONS_12_MTHS_EX_MED\"::float as collections_12_mths_ex_med\n\n from raw_source\n\n)\n\nselect * from final", + "language": "sql", + "refs": [], + "sources": [["LOANS", "PERSONAL_LOANS"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.LOANS.PERSONAL_LOANS"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/loans/personal_loans.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n\n select *\n from RAW.LOANS.PERSONAL_LOANS\n\n),\n\nfinal as (\n\n select\n \"TOTAL_ACC\"::float as total_acc,\n \"ANNUAL_INC\"::float as annual_inc,\n \"EMP_LENGTH\"::varchar as emp_length,\n \"DESC\"::varchar as desc,\n \"TOTAL_PYMNT\"::float as total_pymnt,\n \"LAST_PYMNT_D\"::varchar as last_pymnt_d,\n \"ADDR_STATE\"::varchar as addr_state,\n \"NEXT_PYMNT_D\"::varchar as next_pymnt_d,\n \"EMP_TITLE\"::varchar as emp_title,\n \"COLLECTION_RECOVERY_FEE\"::float as collection_recovery_fee,\n \"MTHS_SINCE_LAST_MAJOR_DEROG\"::float as mths_since_last_major_derog,\n \"INQ_LAST_6MTHS\"::float as inq_last_6mths,\n \"SUB_GRADE\"::varchar as sub_grade,\n \"FUNDED_AMNT_INV\"::float as funded_amnt_inv,\n \"DELINQ_2YRS\"::float as delinq_2yrs,\n \"LOAN_ID\"::varchar as loan_id,\n \"FUNDED_AMNT\"::float as funded_amnt,\n \"VERIFICATION_STATUS\"::varchar as verification_status,\n \"DTI\"::float as dti,\n \"TOTAL_REC_PRNCP\"::float as total_rec_prncp,\n \"GRADE\"::varchar as grade,\n \"HOME_OWNERSHIP\"::varchar as home_ownership,\n \"ISSUE_D\"::varchar as issue_d,\n \"MTHS_SINCE_LAST_DELINQ\"::float as mths_since_last_delinq,\n \"OUT_PRNCP\"::float as out_prncp,\n \"PUB_REC\"::float as pub_rec,\n \"INT_RATE\"::float as int_rate,\n \"ZIP_CODE\"::varchar as zip_code,\n \"OPEN_ACC\"::float as open_acc,\n \"TERM\"::varchar as term,\n \"PYMNT_PLAN\"::varchar as pymnt_plan,\n \"URL\"::varchar as url,\n \"REVOL_BAL\"::float as revol_bal,\n \"RECOVERIES\"::float as recoveries,\n \"LAST_PYMNT_AMNT\"::float as last_pymnt_amnt,\n \"LOAN_AMNT\"::float as loan_amnt,\n \"PURPOSE\"::varchar as purpose,\n \"INITIAL_LIST_STATUS\"::varchar as initial_list_status,\n \"TOTAL_REC_INT\"::float as total_rec_int,\n \"TOTAL_PYMNT_INV\"::float as total_pymnt_inv,\n \"MTHS_SINCE_LAST_RECORD\"::float as mths_since_last_record,\n \"LAST_CREDIT_PULL_D\"::varchar as last_credit_pull_d,\n \"TOTAL_REC_LATE_FEE\"::float as total_rec_late_fee,\n \"MEMBER_ID\"::float as member_id,\n \"POLICY_CODE\"::float as policy_code,\n \"TITLE\"::varchar as title,\n \"LOAN_STATUS\"::varchar as loan_status,\n \"INSTALLMENT\"::float as installment,\n \"EARLIEST_CR_LINE\"::varchar as earliest_cr_line,\n \"REVOL_UTIL\"::varchar as revol_util,\n \"OUT_PRNCP_INV\"::float as out_prncp_inv,\n \"COLLECTIONS_12_MTHS_EX_MED\"::float as collections_12_mths_ex_med\n\n from raw_source\n\n)\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.jhu_covid_19": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "jhu_covid_19", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/covid19_epidemiological_data/jhu_covid_19.sql", + "original_file_path": "models/L1_inlets/covid19_epidemiological_data/jhu_covid_19.sql", + "unique_id": "model.balboa.jhu_covid_19", + "fqn": [ + "balboa", + "L1_inlets", + "covid19_epidemiological_data", + "jhu_covid_19" + ], + "alias": "jhu_covid_19", + "checksum": { + "name": "sha256", + "checksum": "4e02a97e86b096743a2ac838b489d0344aa6a827ee78abfb9fb4ffe05cd72a04" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_covid19_epidemiological_data", + "database": null, + "tags": [], + "meta": { + "business_owner": "John Doe", + "business_analyst": "Jane Doe", + "data_steward": "Jake Doe" + }, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains raw John Hopkins COVID19 data", + "columns": { + "country_region": { + "name": "country_region", + "description": "The name of the country or region where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "province_state": { + "name": "province_state", + "description": "The name of the province or state where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "county": { + "name": "county", + "description": "The name of the county where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "fips": { + "name": "fips", + "description": "Federal Information Processing Standards (FIPS) code for the county where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "date": { + "name": "date", + "description": "The date when the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "case_type": { + "name": "case_type", + "description": "The type of COVID-19 case (confirmed, deaths, recovered)", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "cases": { + "name": "cases", + "description": "The number of COVID-19 cases for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "long": { + "name": "long", + "description": "The longitude coordinate of the location where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "lat": { + "name": "lat", + "description": "The latitude coordinate of the location where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "iso3166_1": { + "name": "iso3166_1", + "description": "The ISO 3166-1 alpha-2 code for the country where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "iso3166_2": { + "name": "iso3166_2", + "description": "The ISO 3166-2 code for the country where the data was collected", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "difference": { + "name": "difference", + "description": "The difference in case numbers from the previous day's data", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_updated_date": { + "name": "last_updated_date", + "description": "The date when the data was last updated", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "last_reported_flag": { + "name": "last_reported_flag", + "description": "A flag indicating whether the data is the most recently reported for a given location and date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": { + "business_owner": "John Doe", + "business_analyst": "Jane Doe", + "data_steward": "Jake Doe" + }, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/covid19_epidemiological_data/jhu_covid_19.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_covid19_epidemiological_data", + "persist_docs": { "relation": true, "columns": true }, + "meta": { + "business_owner": "John Doe", + "business_analyst": "Jane Doe", + "data_steward": "Jake Doe" + } + }, + "created_at": 1705588677.5242949, + "relation_name": "BALBOA_DEV.gomezn.jhu_covid_19", + "raw_code": "with raw_source as (\n select * from {{ source(\"covid19_epidemiological_data\", \"jhu_covid_19\") }}\n),\n\nfinal as (\n\n select\n \"COUNTRY_REGION\" as country_region,\n \"PROVINCE_STATE\" as province_state,\n \"COUNTY\" as county,\n \"FIPS\" as fips,\n \"DATE\" as date,\n \"CASE_TYPE\" as case_type,\n \"CASES\" as cases,\n \"LONG\" as long,\n \"LAT\" as lat,\n \"ISO3166_1\" as iso3166_1,\n \"ISO3166_2\" as iso3166_2,\n \"DIFFERENCE\" as difference,\n \"LAST_UPDATED_DATE\" as last_updated_date,\n \"LAST_REPORTED_FLAG\" as last_reported_flag\n from raw_source\n\n)\n\nselect *\nfrom final", + "language": "sql", + "refs": [], + "sources": [["covid19_epidemiological_data", "jhu_covid_19"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.covid19_epidemiological_data.jhu_covid_19"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/covid19_epidemiological_data/jhu_covid_19.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n select * from covid19_epidemiological_data.public.JHU_COVID_19\n),\n\nfinal as (\n\n select\n \"COUNTRY_REGION\" as country_region,\n \"PROVINCE_STATE\" as province_state,\n \"COUNTY\" as county,\n \"FIPS\" as fips,\n \"DATE\" as date,\n \"CASE_TYPE\" as case_type,\n \"CASES\" as cases,\n \"LONG\" as long,\n \"LAT\" as lat,\n \"ISO3166_1\" as iso3166_1,\n \"ISO3166_2\" as iso3166_2,\n \"DIFFERENCE\" as difference,\n \"LAST_UPDATED_DATE\" as last_updated_date,\n \"LAST_REPORTED_FLAG\" as last_reported_flag\n from raw_source\n\n)\n\nselect *\nfrom final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.query_history": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "query_history", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/account_usage/query_history.sql", + "original_file_path": "models/L1_inlets/account_usage/query_history.sql", + "unique_id": "model.balboa.query_history", + "fqn": ["balboa", "L1_inlets", "account_usage", "query_history"], + "alias": "query_history", + "checksum": { + "name": "sha256", + "checksum": "a44747285fa93059342f517c7ce18935b59b94ab7c7de2c3bdc8954eec1b1fc6" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_account_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains raw data on the history of executed queries.", + "columns": { + "query_id": { + "name": "query_id", + "description": "A unique identifier assigned to the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_text": { + "name": "query_text", + "description": "The SQL text of the executed query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "database_id": { + "name": "database_id", + "description": "The unique identifier of the database where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "database_name": { + "name": "database_name", + "description": "The name of the database where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "schema_id": { + "name": "schema_id", + "description": "The unique identifier of the schema where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "schema_name": { + "name": "schema_name", + "description": "The name of the schema where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_type": { + "name": "query_type", + "description": "The type of query executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "session_id": { + "name": "session_id", + "description": "A unique identifier for the session in which the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "user_name": { + "name": "user_name", + "description": "The name of the user who executed the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "role_name": { + "name": "role_name", + "description": "The name of the role associated with the user who executed the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_id": { + "name": "warehouse_id", + "description": "The unique identifier of the warehouse where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_name": { + "name": "warehouse_name", + "description": "The name of the warehouse where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_size": { + "name": "warehouse_size", + "description": "The size of the warehouse where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_type": { + "name": "warehouse_type", + "description": "The type of warehouse where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "cluster_number": { + "name": "cluster_number", + "description": "The number of the cluster where the query was executed", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_tag": { + "name": "query_tag", + "description": "The tag assigned to the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "execution_status": { + "name": "execution_status", + "description": "The status of the query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "error_code": { + "name": "error_code", + "description": "The error code if the query execution resulted in an error", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "error_message": { + "name": "error_message", + "description": "The error message if the query execution resulted in an error", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_time": { + "name": "start_time", + "description": "The start time of the query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "end_time": { + "name": "end_time", + "description": "The end time of the query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_elapsed_time": { + "name": "total_elapsed_time", + "description": "The total time elapsed during the query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_scanned": { + "name": "bytes_scanned", + "description": "The amount of data in bytes scanned by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "percentage_scanned_from_cache": { + "name": "percentage_scanned_from_cache", + "description": "The percentage of data scanned from cache during the query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_written": { + "name": "bytes_written", + "description": "The amount of data in bytes written by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_written_to_result": { + "name": "bytes_written_to_result", + "description": "The amount of data in bytes written to the query result", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_read_from_result": { + "name": "bytes_read_from_result", + "description": "The amount of data in bytes read from the query result", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "rows_produced": { + "name": "rows_produced", + "description": "The number of rows produced by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "rows_inserted": { + "name": "rows_inserted", + "description": "The number of rows inserted by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "rows_updated": { + "name": "rows_updated", + "description": "The number of rows updated by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "rows_deleted": { + "name": "rows_deleted", + "description": "The number of rows deleted by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "rows_unloaded": { + "name": "rows_unloaded", + "description": "The number of rows unloaded by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_deleted": { + "name": "bytes_deleted", + "description": "The amount of data in bytes deleted by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "partitions_scanned": { + "name": "partitions_scanned", + "description": "The number of partitions scanned by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "partitions_total": { + "name": "partitions_total", + "description": "The total number of partitions processed by the query", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_spilled_to_local_storage": { + "name": "bytes_spilled_to_local_storage", + "description": "The amount of data in bytes spilled to local storage during query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_spilled_to_remote_storage": { + "name": "bytes_spilled_to_remote_storage", + "description": "The amount of data in bytes spilled to remote storage during query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_sent_over_the_network": { + "name": "bytes_sent_over_the_network", + "description": "The amount of data in bytes sent over the network during query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "compilation_time": { + "name": "compilation_time", + "description": "The time taken for query compilation", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "execution_time": { + "name": "execution_time", + "description": "The time taken for query execution", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "queued_provisioning_time": { + "name": "queued_provisioning_time", + "description": "The time taken for warehouse provisioning", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "queued_repair_time": { + "name": "queued_repair_time", + "description": "The time taken for warehouse repair", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "queued_overload_time": { + "name": "queued_overload_time", + "description": "The time taken for warehouse overload", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "transaction_blocked_time": { + "name": "transaction_blocked_time", + "description": "The time taken for transaction blocking", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "outbound_data_transfer_cloud": { + "name": "outbound_data_transfer_cloud", + "description": "The cloud provider for outbound data transfer", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "outbound_data_transfer_region": { + "name": "outbound_data_transfer_region", + "description": "The region for outbound data transfer", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "outbound_data_transfer_bytes": { + "name": "outbound_data_transfer_bytes", + "description": "The amount of data in bytes transferred outbound", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "inbound_data_transfer_cloud": { + "name": "inbound_data_transfer_cloud", + "description": "The cloud provider for inbound data transfer", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "inbound_data_transfer_region": { + "name": "inbound_data_transfer_region", + "description": "The region for inbound data transfer", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "inbound_data_transfer_bytes": { + "name": "inbound_data_transfer_bytes", + "description": "The amount of data in bytes transferred inbound", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "list_external_files_time": { + "name": "list_external_files_time", + "description": "The time taken to list external files", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used_cloud_services": { + "name": "credits_used_cloud_services", + "description": "The amount of credits used for cloud services", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "release_version": { + "name": "release_version", + "description": "The release version of the query engine used", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "external_function_total_invocations": { + "name": "external_function_total_invocations", + "description": "The total number of invocations of external functions", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "external_function_total_sent_rows": { + "name": "external_function_total_sent_rows", + "description": "The total number of rows sent to external functions", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "external_function_total_received_rows": { + "name": "external_function_total_received_rows", + "description": "The total number of rows received from external functions", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "external_function_total_sent_bytes": { + "name": "external_function_total_sent_bytes", + "description": "The amount of data in bytes sent to external functions", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "external_function_total_received_bytes": { + "name": "external_function_total_received_bytes", + "description": "The amount of data in bytes received from external functions", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "query_load_percent": { + "name": "query_load_percent", + "description": "The percentage of query load on the system", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "is_client_generated_statement": { + "name": "is_client_generated_statement", + "description": "Whether the statement is a client-generated statement or not", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/account_usage/query_history.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_account_usage", + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.542295, + "relation_name": "BALBOA_DEV.gomezn.query_history", + "raw_code": "with raw_source as (\n\n select *\n from {{ source('ACCOUNT_USAGE', 'QUERY_HISTORY') }}\n\n),\n\nfinal as (\n\n select\n \"QUERY_ID\" as query_id,\n \"QUERY_TEXT\" as query_text,\n \"DATABASE_ID\" as database_id,\n \"DATABASE_NAME\" as database_name,\n \"SCHEMA_ID\" as schema_id,\n \"SCHEMA_NAME\" as schema_name,\n \"QUERY_TYPE\" as query_type,\n \"SESSION_ID\" as session_id,\n \"USER_NAME\" as user_name,\n \"ROLE_NAME\" as role_name,\n \"WAREHOUSE_ID\" as warehouse_id,\n \"WAREHOUSE_NAME\" as warehouse_name,\n \"WAREHOUSE_SIZE\" as warehouse_size,\n \"WAREHOUSE_TYPE\" as warehouse_type,\n \"CLUSTER_NUMBER\" as cluster_number,\n \"QUERY_TAG\" as query_tag,\n \"EXECUTION_STATUS\" as execution_status,\n \"ERROR_CODE\" as error_code,\n \"ERROR_MESSAGE\" as error_message,\n \"START_TIME\" as start_time,\n \"END_TIME\" as end_time,\n \"TOTAL_ELAPSED_TIME\" as total_elapsed_time,\n \"BYTES_SCANNED\" as bytes_scanned,\n \"PERCENTAGE_SCANNED_FROM_CACHE\" as percentage_scanned_from_cache,\n \"BYTES_WRITTEN\" as bytes_written,\n \"BYTES_WRITTEN_TO_RESULT\" as bytes_written_to_result,\n \"BYTES_READ_FROM_RESULT\" as bytes_read_from_result,\n \"ROWS_PRODUCED\" as rows_produced,\n \"ROWS_INSERTED\" as rows_inserted,\n \"ROWS_UPDATED\" as rows_updated,\n \"ROWS_DELETED\" as rows_deleted,\n \"ROWS_UNLOADED\" as rows_unloaded,\n \"BYTES_DELETED\" as bytes_deleted,\n \"PARTITIONS_SCANNED\" as partitions_scanned,\n \"PARTITIONS_TOTAL\" as partitions_total,\n \"BYTES_SPILLED_TO_LOCAL_STORAGE\" as bytes_spilled_to_local_storage,\n \"BYTES_SPILLED_TO_REMOTE_STORAGE\" as bytes_spilled_to_remote_storage,\n \"BYTES_SENT_OVER_THE_NETWORK\" as bytes_sent_over_the_network,\n \"COMPILATION_TIME\" as compilation_time,\n \"EXECUTION_TIME\" as execution_time,\n \"QUEUED_PROVISIONING_TIME\" as queued_provisioning_time,\n \"QUEUED_REPAIR_TIME\" as queued_repair_time,\n \"QUEUED_OVERLOAD_TIME\" as queued_overload_time,\n \"TRANSACTION_BLOCKED_TIME\" as transaction_blocked_time,\n \"OUTBOUND_DATA_TRANSFER_CLOUD\" as outbound_data_transfer_cloud,\n \"OUTBOUND_DATA_TRANSFER_REGION\" as outbound_data_transfer_region,\n \"OUTBOUND_DATA_TRANSFER_BYTES\" as outbound_data_transfer_bytes,\n \"INBOUND_DATA_TRANSFER_CLOUD\" as inbound_data_transfer_cloud,\n \"INBOUND_DATA_TRANSFER_REGION\" as inbound_data_transfer_region,\n \"INBOUND_DATA_TRANSFER_BYTES\" as inbound_data_transfer_bytes,\n \"LIST_EXTERNAL_FILES_TIME\" as list_external_files_time,\n \"CREDITS_USED_CLOUD_SERVICES\" as credits_used_cloud_services,\n \"RELEASE_VERSION\" as release_version,\n \"EXTERNAL_FUNCTION_TOTAL_INVOCATIONS\" as external_function_total_invocations,\n \"EXTERNAL_FUNCTION_TOTAL_SENT_ROWS\" as external_function_total_sent_rows,\n \"EXTERNAL_FUNCTION_TOTAL_RECEIVED_ROWS\" as external_function_total_received_rows,\n \"EXTERNAL_FUNCTION_TOTAL_SENT_BYTES\" as external_function_total_sent_bytes,\n \"EXTERNAL_FUNCTION_TOTAL_RECEIVED_BYTES\" as external_function_total_received_bytes,\n \"QUERY_LOAD_PERCENT\" as query_load_percent,\n \"IS_CLIENT_GENERATED_STATEMENT\" as is_client_generated_statement\n\n from raw_source\n\n)\n\nselect * from final", + "language": "sql", + "refs": [], + "sources": [["ACCOUNT_USAGE", "QUERY_HISTORY"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.ACCOUNT_USAGE.QUERY_HISTORY"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/account_usage/query_history.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n\n select *\n from SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY\n\n),\n\nfinal as (\n\n select\n \"QUERY_ID\" as query_id,\n \"QUERY_TEXT\" as query_text,\n \"DATABASE_ID\" as database_id,\n \"DATABASE_NAME\" as database_name,\n \"SCHEMA_ID\" as schema_id,\n \"SCHEMA_NAME\" as schema_name,\n \"QUERY_TYPE\" as query_type,\n \"SESSION_ID\" as session_id,\n \"USER_NAME\" as user_name,\n \"ROLE_NAME\" as role_name,\n \"WAREHOUSE_ID\" as warehouse_id,\n \"WAREHOUSE_NAME\" as warehouse_name,\n \"WAREHOUSE_SIZE\" as warehouse_size,\n \"WAREHOUSE_TYPE\" as warehouse_type,\n \"CLUSTER_NUMBER\" as cluster_number,\n \"QUERY_TAG\" as query_tag,\n \"EXECUTION_STATUS\" as execution_status,\n \"ERROR_CODE\" as error_code,\n \"ERROR_MESSAGE\" as error_message,\n \"START_TIME\" as start_time,\n \"END_TIME\" as end_time,\n \"TOTAL_ELAPSED_TIME\" as total_elapsed_time,\n \"BYTES_SCANNED\" as bytes_scanned,\n \"PERCENTAGE_SCANNED_FROM_CACHE\" as percentage_scanned_from_cache,\n \"BYTES_WRITTEN\" as bytes_written,\n \"BYTES_WRITTEN_TO_RESULT\" as bytes_written_to_result,\n \"BYTES_READ_FROM_RESULT\" as bytes_read_from_result,\n \"ROWS_PRODUCED\" as rows_produced,\n \"ROWS_INSERTED\" as rows_inserted,\n \"ROWS_UPDATED\" as rows_updated,\n \"ROWS_DELETED\" as rows_deleted,\n \"ROWS_UNLOADED\" as rows_unloaded,\n \"BYTES_DELETED\" as bytes_deleted,\n \"PARTITIONS_SCANNED\" as partitions_scanned,\n \"PARTITIONS_TOTAL\" as partitions_total,\n \"BYTES_SPILLED_TO_LOCAL_STORAGE\" as bytes_spilled_to_local_storage,\n \"BYTES_SPILLED_TO_REMOTE_STORAGE\" as bytes_spilled_to_remote_storage,\n \"BYTES_SENT_OVER_THE_NETWORK\" as bytes_sent_over_the_network,\n \"COMPILATION_TIME\" as compilation_time,\n \"EXECUTION_TIME\" as execution_time,\n \"QUEUED_PROVISIONING_TIME\" as queued_provisioning_time,\n \"QUEUED_REPAIR_TIME\" as queued_repair_time,\n \"QUEUED_OVERLOAD_TIME\" as queued_overload_time,\n \"TRANSACTION_BLOCKED_TIME\" as transaction_blocked_time,\n \"OUTBOUND_DATA_TRANSFER_CLOUD\" as outbound_data_transfer_cloud,\n \"OUTBOUND_DATA_TRANSFER_REGION\" as outbound_data_transfer_region,\n \"OUTBOUND_DATA_TRANSFER_BYTES\" as outbound_data_transfer_bytes,\n \"INBOUND_DATA_TRANSFER_CLOUD\" as inbound_data_transfer_cloud,\n \"INBOUND_DATA_TRANSFER_REGION\" as inbound_data_transfer_region,\n \"INBOUND_DATA_TRANSFER_BYTES\" as inbound_data_transfer_bytes,\n \"LIST_EXTERNAL_FILES_TIME\" as list_external_files_time,\n \"CREDITS_USED_CLOUD_SERVICES\" as credits_used_cloud_services,\n \"RELEASE_VERSION\" as release_version,\n \"EXTERNAL_FUNCTION_TOTAL_INVOCATIONS\" as external_function_total_invocations,\n \"EXTERNAL_FUNCTION_TOTAL_SENT_ROWS\" as external_function_total_sent_rows,\n \"EXTERNAL_FUNCTION_TOTAL_RECEIVED_ROWS\" as external_function_total_received_rows,\n \"EXTERNAL_FUNCTION_TOTAL_SENT_BYTES\" as external_function_total_sent_bytes,\n \"EXTERNAL_FUNCTION_TOTAL_RECEIVED_BYTES\" as external_function_total_received_bytes,\n \"QUERY_LOAD_PERCENT\" as query_load_percent,\n \"IS_CLIENT_GENERATED_STATEMENT\" as is_client_generated_statement\n\n from raw_source\n\n)\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.storage_usage": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "storage_usage", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/account_usage/storage_usage.sql", + "original_file_path": "models/L1_inlets/account_usage/storage_usage.sql", + "unique_id": "model.balboa.storage_usage", + "fqn": ["balboa", "L1_inlets", "account_usage", "storage_usage"], + "alias": "storage_usage", + "checksum": { + "name": "sha256", + "checksum": "daf0761848152d424fa60bab194b54a59523c1c861d8690cd2fbabdfd62aa61c" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_account_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains raw data on storage usage.", + "columns": { + "usage_date": { + "name": "usage_date", + "description": "The date when storage usage was recorded", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "storage_bytes": { + "name": "storage_bytes", + "description": "The total amount of data in bytes stored", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "stage_bytes": { + "name": "stage_bytes", + "description": "The amount of data in bytes stored in the staging area", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "failsafe_bytes": { + "name": "failsafe_bytes", + "description": "The amount of data in bytes stored in the failsafe area", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/account_usage/storage_usage.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_account_usage", + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.532165, + "relation_name": "BALBOA_DEV.gomezn.storage_usage", + "raw_code": "with raw_source as (\n\n select *\n from {{ source('ACCOUNT_USAGE', 'STORAGE_USAGE') }}\n\n),\n\nfinal as (\n\n select\n \"USAGE_DATE\" as usage_date,\n \"STORAGE_BYTES\" as storage_bytes,\n \"STAGE_BYTES\" as stage_bytes,\n \"FAILSAFE_BYTES\" as failsafe_bytes\n\n from raw_source\n\n)\n\nselect * from final", + "language": "sql", + "refs": [], + "sources": [["ACCOUNT_USAGE", "STORAGE_USAGE"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.ACCOUNT_USAGE.STORAGE_USAGE"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/account_usage/storage_usage.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n\n select *\n from SNOWFLAKE.ACCOUNT_USAGE.STORAGE_USAGE\n\n),\n\nfinal as (\n\n select\n \"USAGE_DATE\" as usage_date,\n \"STORAGE_BYTES\" as storage_bytes,\n \"STAGE_BYTES\" as stage_bytes,\n \"FAILSAFE_BYTES\" as failsafe_bytes\n\n from raw_source\n\n)\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.warehouse_metering_history": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "warehouse_metering_history", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/account_usage/warehouse_metering_history.sql", + "original_file_path": "models/L1_inlets/account_usage/warehouse_metering_history.sql", + "unique_id": "model.balboa.warehouse_metering_history", + "fqn": [ + "balboa", + "L1_inlets", + "account_usage", + "warehouse_metering_history" + ], + "alias": "warehouse_metering_history", + "checksum": { + "name": "sha256", + "checksum": "d1ca1b2d3427fc2cdd93bb8d211f482d303afad805f0560d02e483703dac663f" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_account_usage", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains raw data on the metering history of warehouses.", + "columns": { + "start_time": { + "name": "start_time", + "description": "The start time of the warehouse metering period", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "end_time": { + "name": "end_time", + "description": "The end time of the warehouse metering period", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_id": { + "name": "warehouse_id", + "description": "The unique identifier of the warehouse", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "warehouse_name": { + "name": "warehouse_name", + "description": "The name of the warehouse", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used": { + "name": "credits_used", + "description": "The total number of credits used by the warehouse", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used_compute": { + "name": "credits_used_compute", + "description": "The number of credits used for compute", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used_cloud_services": { + "name": "credits_used_cloud_services", + "description": "The number of credits used for cloud services", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/account_usage/warehouse_metering_history.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_account_usage", + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.5475168, + "relation_name": "BALBOA_DEV.gomezn.warehouse_metering_history", + "raw_code": "with raw_source as (\n\n select *\n from {{ source('ACCOUNT_USAGE', 'WAREHOUSE_METERING_HISTORY') }}\n\n),\n\nfinal as (\n\n select\n \"START_TIME\" as start_time,\n \"END_TIME\" as end_time,\n \"WAREHOUSE_ID\" as warehouse_id,\n \"WAREHOUSE_NAME\" as warehouse_name,\n \"CREDITS_USED\" as credits_used,\n \"CREDITS_USED_COMPUTE\" as credits_used_compute,\n \"CREDITS_USED_CLOUD_SERVICES\" as credits_used_cloud_services\n\n from raw_source\n\n)\n\n\nselect * from final", + "language": "sql", + "refs": [], + "sources": [["ACCOUNT_USAGE", "WAREHOUSE_METERING_HISTORY"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/account_usage/warehouse_metering_history.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n\n select *\n from SNOWFLAKE.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY\n\n),\n\nfinal as (\n\n select\n \"START_TIME\" as start_time,\n \"END_TIME\" as end_time,\n \"WAREHOUSE_ID\" as warehouse_id,\n \"WAREHOUSE_NAME\" as warehouse_name,\n \"CREDITS_USED\" as credits_used,\n \"CREDITS_USED_COMPUTE\" as credits_used_compute,\n \"CREDITS_USED_CLOUD_SERVICES\" as credits_used_cloud_services\n\n from raw_source\n\n)\n\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.pipe_usage_history": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "pipe_usage_history", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/account_usage/pipe_usage_history.sql", + "original_file_path": "models/L1_inlets/account_usage/pipe_usage_history.sql", + "unique_id": "model.balboa.pipe_usage_history", + "fqn": ["balboa", "L1_inlets", "account_usage", "pipe_usage_history"], + "alias": "pipe_usage_history", + "checksum": { + "name": "sha256", + "checksum": "3541eaa3bb019212a24a23894f7edc8dd8eaac04a4bd5e2a5abfcdc6679e0111" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_account_usage", + "database": null, + "tags": [], + "meta": { "owner": "@alice", "model_maturity": "in dev" }, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains raw data on the usage history of pipes.", + "columns": { + "pipe_id": { + "name": "pipe_id", + "description": "A unique identifier assigned to the pipe", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "pipe_name": { + "name": "pipe_name", + "description": "The name of the pipe used for identification purposes", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "start_time": { + "name": "start_time", + "description": "The start time when the pipe was used", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "end_time": { + "name": "end_time", + "description": "The end time when the pipe was used", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "credits_used": { + "name": "credits_used", + "description": "The amount of credits used by the pipe during usage", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "bytes_inserted": { + "name": "bytes_inserted", + "description": "The amount of data in bytes inserted into the pipe during usage", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "files_inserted": { + "name": "files_inserted", + "description": "The number of files inserted into the pipe during usage", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": { "owner": "@alice", "model_maturity": "in dev" }, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/account_usage/pipe_usage_history.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_account_usage", + "persist_docs": { "relation": true, "columns": true }, + "meta": { "owner": "@alice", "model_maturity": "in dev" } + }, + "created_at": 1705588677.546748, + "relation_name": "BALBOA_DEV.gomezn.pipe_usage_history", + "raw_code": "with raw_source as (\n\n select *\n from {{ source('ACCOUNT_USAGE', 'PIPE_USAGE_HISTORY') }}\n\n),\n\nfinal as (\n select\n \"PIPE_ID\" as pipe_id,\n \"PIPE_NAME\" as pipe_name,\n \"START_TIME\" as start_time,\n \"END_TIME\" as end_time,\n \"CREDITS_USED\" as credits_used,\n \"BYTES_INSERTED\" as bytes_inserted,\n \"FILES_INSERTED\" as files_inserted\n from raw_source\n)\n\nselect * from final", + "language": "sql", + "refs": [], + "sources": [["ACCOUNT_USAGE", "PIPE_USAGE_HISTORY"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.ACCOUNT_USAGE.PIPE_USAGE_HISTORY"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/account_usage/pipe_usage_history.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n\n select *\n from SNOWFLAKE.ACCOUNT_USAGE.PIPE_USAGE_HISTORY\n\n),\n\nfinal as (\n select\n \"PIPE_ID\" as pipe_id,\n \"PIPE_NAME\" as pipe_name,\n \"START_TIME\" as start_time,\n \"END_TIME\" as end_time,\n \"CREDITS_USED\" as credits_used,\n \"BYTES_INSERTED\" as bytes_inserted,\n \"FILES_INSERTED\" as files_inserted\n from raw_source\n)\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.country_populations": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "country_populations", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/country_data/country_populations.sql", + "original_file_path": "models/L1_inlets/country_data/country_populations.sql", + "unique_id": "model.balboa.country_populations", + "fqn": ["balboa", "L1_inlets", "country_data", "country_populations"], + "alias": "country_populations", + "checksum": { + "name": "sha256", + "checksum": "0a2a6900d1278002099512ad2e8fcfe65739949e3fbd42a447fcef3558c62e73" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_country_data", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains population information from the Github Datasets repository.", + "columns": { + "country_code": { + "name": "country_code", + "description": "The ISO 3166-1 alpha-2 code for the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "country_name": { + "name": "country_name", + "description": "The name of the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "value": { + "name": "value", + "description": "The population value for a particular year and country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "year": { + "name": "year", + "description": "The year for which the population value is recorded", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/country_data/country_populations.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_country_data", + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.548015, + "relation_name": "BALBOA_DEV.gomezn.country_populations", + "raw_code": "with raw_source as (\n\n select *\n from {{ source('RAW', 'COUNTRY_POPULATIONS') }}\n\n),\n\nfinal as (\n\n select\n year,\n \"COUNTRY NAME\" as country_name,\n value,\n \"COUNTRY CODE\" as country_code\n\n from raw_source\n\n)\n\nselect * from final\norder by country_code", + "language": "sql", + "refs": [], + "sources": [["RAW", "COUNTRY_POPULATIONS"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.RAW.COUNTRY_POPULATIONS"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/country_data/country_populations.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n\n select *\n from RAW.RAW.COUNTRY_POPULATIONS\n\n),\n\nfinal as (\n\n select\n year,\n \"COUNTRY NAME\" as country_name,\n value,\n \"COUNTRY CODE\" as country_code\n\n from raw_source\n\n)\n\nselect * from final\norder by country_code", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.engagement_events_report": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "engagement_events_report", + "resource_type": "model", + "package_name": "balboa", + "path": "L1_inlets/google_analytics_4/engagement_events_report.sql", + "original_file_path": "models/L1_inlets/google_analytics_4/engagement_events_report.sql", + "unique_id": "model.balboa.engagement_events_report", + "fqn": [ + "balboa", + "L1_inlets", + "google_analytics_4", + "engagement_events_report" + ], + "alias": "engagement_events_report", + "checksum": { + "name": "sha256", + "checksum": "fb8af8c1d192504e5ba53f3efdebda2fd142b5c8b0a01c2673f90a196a62032c" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l1_google_analytics_4", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "view", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": null }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "A report of user engagement events, including the event name, count, and revenue", + "columns": { + "date": { + "name": "date", + "description": "The date on which the event occurred", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "property": { + "name": "property", + "description": "The property associated with the event", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "fivetran_id": { + "name": "fivetran_id", + "description": "The ID assigned by Fivetran to the event", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "event_name": { + "name": "event_name", + "description": "The name of the event", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_users": { + "name": "total_users", + "description": "The total number of users involved in the event", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "event_count_per_user": { + "name": "event_count_per_user", + "description": "The number of times each user participated in the event", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "event_count": { + "name": "event_count", + "description": "The overall number of times the event occurred", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_revenue": { + "name": "total_revenue", + "description": "The total revenue generated by the event", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "fivetran_synced": { + "name": "fivetran_synced", + "description": "Flag indicating whether the event data has been synced from Fivetran", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://models/L1_inlets/google_analytics_4/engagement_events_report.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "view", + "schema": "l1_google_analytics_4", + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.557385, + "relation_name": "BALBOA_DEV.gomezn.engagement_events_report", + "raw_code": "with raw_source as (\n\n select *\n from {{ source('GOOGLE_ANALYTICS_4', 'ENGAGEMENT_EVENTS_REPORT') }}\n\n),\n\n\nfinal as (\n\n select\n \"DATE\"::date as date,\n \"PROPERTY\"::varchar as property,\n \"_FIVETRAN_ID\"::varchar as fivetran_id,\n \"EVENT_NAME\"::varchar as event_name,\n \"TOTAL_USERS\"::number as total_users,\n \"EVENT_COUNT_PER_USER\"::float as event_count_per_user,\n \"EVENT_COUNT\"::number as event_count,\n \"TOTAL_REVENUE\"::number as total_revenue,\n \"_FIVETRAN_SYNCED\"::timestamp_tz as fivetran_synced\n\n from raw_source\n\n)\n\nselect * from final", + "language": "sql", + "refs": [], + "sources": [["GOOGLE_ANALYTICS_4", "ENGAGEMENT_EVENTS_REPORT"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/google_analytics_4/engagement_events_report.sql", + "compiled": true, + "compiled_code": "with raw_source as (\n\n select *\n from RAW.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT\n\n),\n\n\nfinal as (\n\n select\n \"DATE\"::date as date,\n \"PROPERTY\"::varchar as property,\n \"_FIVETRAN_ID\"::varchar as fivetran_id,\n \"EVENT_NAME\"::varchar as event_name,\n \"TOTAL_USERS\"::number as total_users,\n \"EVENT_COUNT_PER_USER\"::float as event_count_per_user,\n \"EVENT_COUNT\"::number as event_count,\n \"TOTAL_REVENUE\"::number as total_revenue,\n \"_FIVETRAN_SYNCED\"::timestamp_tz as fivetran_synced\n\n from raw_source\n\n)\n\nselect * from final", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.loans_by_state": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "loans_by_state", + "resource_type": "model", + "package_name": "balboa", + "path": "L3_coves/loan_analytics/loans_by_state.sql", + "original_file_path": "models/L3_coves/loan_analytics/loans_by_state.sql", + "unique_id": "model.balboa.loans_by_state", + "fqn": ["balboa", "L3_coves", "loan_analytics", "loans_by_state"], + "alias": "loans_by_state", + "checksum": { + "name": "sha256", + "checksum": "b97909029a57443c964c6cabd9180b46d3c5acb9861b6b34b4435a93e16300a0" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l3_loan_analytics", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "dynamic_table", + "incremental_strategy": null, + "persist_docs": { "relation": false }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": { "select": ["analyst"] }, + "packages": [], + "docs": { "show": true, "node_color": "#366ccf" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "snowflake_warehouse": "wh_transforming", + "target_lag": "190 days", + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "This model shows the top 10 states with loans.", + "columns": { + "state": { + "name": "state", + "description": "The state where the loans are located.", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "number_of_loans": { + "name": "number_of_loans", + "description": "The total number of loans in each state.", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#366ccf" }, + "patch_path": "balboa://models/L3_coves/loan_analytics/loans_by_state.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "dynamic_table", + "schema": "l3_loan_analytics", + "persist_docs": { "relation": false }, + "docs": { "node_color": "#366ccf" }, + "grants": { "select": ["analyst"] }, + "snowflake_warehouse": "wh_transforming", + "target_lag": "190 days" + }, + "created_at": 1705588677.5633972, + "relation_name": "BALBOA_DEV.gomezn.loans_by_state", + "raw_code": "{{ config(\n materialized = 'dynamic_table',\n snowflake_warehouse = 'wh_transforming',\n target_lag = '190 days',\n\n) }}\n{#\n grants:\n select: ['analyst'] #}\n{# grant select on table BALBOA.L3_LOAN_ANALYTICS.LOANS_BY_STATE to role analyst; #}\n\n\nselect\n addr_state as state,\n count(*) as number_of_loans\nfrom\n {{ source('LOANS', 'PERSONAL_LOANS') }}\ngroup by 1\norder by 2 desc\nlimit 10", + "language": "sql", + "refs": [], + "sources": [["LOANS", "PERSONAL_LOANS"]], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": ["source.balboa.LOANS.PERSONAL_LOANS"] + }, + "compiled_path": "target/compiled/balboa/models/L3_coves/loan_analytics/loans_by_state.sql", + "compiled": true, + "compiled_code": "\n\n\n\n\nselect\n addr_state as state,\n count(*) as number_of_loans\nfrom\n RAW.LOANS.PERSONAL_LOANS\ngroup by 1\norder by 2 desc\nlimit 10", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.covid_cases_country": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "covid_cases_country", + "resource_type": "model", + "package_name": "balboa", + "path": "L3_coves/covid_analytics/covid_cases_country.sql", + "original_file_path": "models/L3_coves/covid_analytics/covid_cases_country.sql", + "unique_id": "model.balboa.covid_cases_country", + "fqn": ["balboa", "L3_coves", "covid_analytics", "covid_cases_country"], + "alias": "covid_cases_country", + "checksum": { + "name": "sha256", + "checksum": "6900bd9a9eb8c78fd7292bd281a446ea40f820610a7cb9ab39874ee00bdf343f" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l3_covid_analytics", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "table", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#366ccf" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains cleaned up John Hopkins COVID-19 cases by country", + "columns": { + "country": { + "name": "country", + "description": "The name of the country for which COVID-19 cases are being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "lat": { + "name": "lat", + "description": "The latitude of the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "long": { + "name": "long", + "description": "The longitude of the country", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "date": { + "name": "date", + "description": "The date for which COVID-19 cases are being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "confirmed": { + "name": "confirmed", + "description": "The number of confirmed COVID-19 cases in the country on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "deaths": { + "name": "deaths", + "description": "The number of COVID-19 deaths in the country on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "active": { + "name": "active", + "description": "The number of active COVID-19 cases in the country on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "recovered": { + "name": "recovered", + "description": "The number of recovered COVID-19 cases in the country on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#366ccf" }, + "patch_path": "balboa://models/L3_coves/covid_analytics/covid_cases_country.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "table", + "schema": "l3_covid_analytics", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#366ccf" } + }, + "created_at": 1705588677.564275, + "relation_name": "BALBOA_DEV.gomezn.covid_cases_country", + "raw_code": "with covid_cases as (\n select\n location_id,\n date,\n confirmed,\n deaths,\n active,\n recovered\n from {{ ref('total_covid_cases') }}\n),\n\nlocation as (\n select\n location_id,\n state,\n country,\n lat,\n long\n from {{ ref('covid_location') }}\n)\n\nselect\n location.country,\n location.lat,\n location.long,\n covid_cases.date,\n covid_cases.confirmed,\n covid_cases.deaths,\n covid_cases.active,\n covid_cases.recovered\nfrom covid_cases\njoin location\n on covid_cases.location_id = location.location_id\nwhere\n location.country is not null\n and location.state is null", + "language": "sql", + "refs": [ + { "name": "total_covid_cases", "package": null, "version": null }, + { "name": "total_covid_cases", "package": null, "version": null }, + { "name": "covid_location", "package": null, "version": null }, + { "name": "covid_location", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": [ + "model.balboa.total_covid_cases", + "model.balboa.covid_location" + ] + }, + "compiled_path": "target/compiled/balboa/models/L3_coves/covid_analytics/covid_cases_country.sql", + "compiled": true, + "compiled_code": "with covid_cases as (\n select\n location_id,\n date,\n confirmed,\n deaths,\n active,\n recovered\n from BALBOA_DEV.gomezn.total_covid_cases\n),\n\nlocation as (\n select\n location_id,\n state,\n country,\n lat,\n long\n from BALBOA_DEV.gomezn.covid_location\n)\n\nselect\n location.country,\n location.lat,\n location.long,\n covid_cases.date,\n covid_cases.confirmed,\n covid_cases.deaths,\n covid_cases.active,\n covid_cases.recovered\nfrom covid_cases\njoin location\n on covid_cases.location_id = location.location_id\nwhere\n location.country is not null\n and location.state is null", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "model.balboa.covid_cases_state": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "covid_cases_state", + "resource_type": "model", + "package_name": "balboa", + "path": "L3_coves/covid_analytics/covid_cases_state.sql", + "original_file_path": "models/L3_coves/covid_analytics/covid_cases_state.sql", + "unique_id": "model.balboa.covid_cases_state", + "fqn": ["balboa", "L3_coves", "covid_analytics", "covid_cases_state"], + "alias": "covid_cases_state", + "checksum": { + "name": "sha256", + "checksum": "ad4f27ddd6cd3204189e675adfda69f0b47334fc5ac81c4565b04f1cba5836c8" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "l3_covid_analytics", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "table", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#366ccf" }, + "contract": { "enforced": false }, + "transient": "true", + "copy_grants": true, + "post-hook": [ + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + }, + { + "sql": "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}", + "transaction": true, + "index": null + } + ], + "pre-hook": [] + }, + "tags": [], + "description": "Contains COVID-19 cases by state", + "columns": { + "location_id": { + "name": "location_id", + "description": "The unique identifier for the state", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "country": { + "name": "country", + "description": "The name of the country where the state is located", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "state": { + "name": "state", + "description": "The name of the state for which COVID-19 cases are being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "lat": { + "name": "lat", + "description": "The latitude of the state", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "long": { + "name": "long", + "description": "The longitude of the state", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "date": { + "name": "date", + "description": "The date for which COVID-19 cases are being reported", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "cases": { + "name": "cases", + "description": "The number of confirmed COVID-19 cases in the state on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "deaths": { + "name": "deaths", + "description": "The number of COVID-19 deaths in the state on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "active": { + "name": "active", + "description": "The number of active COVID-19 cases in the state on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "recovered": { + "name": "recovered", + "description": "The number of recovered COVID-19 cases in the state on the given date", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#366ccf" }, + "patch_path": "balboa://models/L3_coves/covid_analytics/covid_cases_state.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "post-hook": [ + "{{ dbt_snow_mask.apply_masking_policy('sources') if target.name == 'prd_pii' }}", + "{{ dbt_snow_mask.apply_masking_policy('models') if target.name == 'prd_pii' }}" + ], + "transient": "{{ 'false' if target.name == 'prd' else 'true' }}", + "copy_grants": true, + "materialized": "table", + "schema": "l3_covid_analytics", + "persist_docs": { "relation": true, "columns": true }, + "docs": { "node_color": "#366ccf" } + }, + "created_at": 1705588677.565061, + "relation_name": "BALBOA_DEV.gomezn.covid_cases_state", + "raw_code": "with covid_cases as (\n select * from {{ ref('total_covid_cases') }}\n),\n\nlocation as (\n select * from {{ ref('covid_location') }}\n)\n\nselect\n location.location_id,\n location.country,\n location.state,\n location.lat,\n location.long,\n covid_cases.date,\n covid_cases.confirmed as cases,\n covid_cases.deaths,\n covid_cases.active,\n covid_cases.recovered\nfrom covid_cases\ninner join location\n on covid_cases.location_id = location.location_id\nwhere\n location.state is not null\n and location.county is not null", + "language": "sql", + "refs": [ + { "name": "total_covid_cases", "package": null, "version": null }, + { "name": "total_covid_cases", "package": null, "version": null }, + { "name": "covid_location", "package": null, "version": null }, + { "name": "covid_location", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": ["macro.balboa.ref"], + "nodes": [ + "model.balboa.total_covid_cases", + "model.balboa.covid_location" + ] + }, + "compiled_path": "target/compiled/balboa/models/L3_coves/covid_analytics/covid_cases_state.sql", + "compiled": true, + "compiled_code": "with covid_cases as (\n select * from BALBOA_DEV.gomezn.total_covid_cases\n),\n\nlocation as (\n select * from BALBOA_DEV.gomezn.covid_location\n)\n\nselect\n location.location_id,\n location.country,\n location.state,\n location.lat,\n location.long,\n covid_cases.date,\n covid_cases.confirmed as cases,\n covid_cases.deaths,\n covid_cases.active,\n covid_cases.recovered\nfrom covid_cases\ninner join location\n on covid_cases.location_id = location.location_id\nwhere\n location.state is not null\n and location.county is not null", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "access": "protected", + "constraints": [], + "version": null, + "latest_version": null, + "deprecation_date": null + }, + "snapshot.balboa.snp_jhu_dashboard_covid_19_global": { + "database": "raw", + "schema": "snapshots", + "name": "snp_jhu_dashboard_covid_19_global", + "resource_type": "snapshot", + "package_name": "balboa", + "path": "inlets/starschema_covid19/snp_jhu_dashboard_covid_19_global.sql", + "original_file_path": "snapshots/inlets/starschema_covid19/snp_jhu_dashboard_covid_19_global.sql", + "unique_id": "snapshot.balboa.snp_jhu_dashboard_covid_19_global", + "fqn": [ + "balboa", + "inlets", + "starschema_covid19", + "snp_jhu_dashboard_covid_19_global", + "snp_jhu_dashboard_covid_19_global" + ], + "alias": "snp_jhu_dashboard_covid_19_global", + "checksum": { + "name": "sha256", + "checksum": "7a27e73eb2433798cb747e6324c7269b00b14fd12cac7ad74cef7e04fca4b1e4" + }, + "config": { + "enabled": true, + "alias": null, + "schema": null, + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "snapshot", + "incremental_strategy": null, + "persist_docs": {}, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": "ifnull(country_region,'') || '|' || ifnull(province_state,'') || '|' || ifnull(county,'') || '|' || to_varchar(date) || last_update_date", + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#0f703d" }, + "contract": { "enforced": false }, + "strategy": "timestamp", + "target_schema": "snapshots", + "target_database": "raw", + "updated_at": "last_update_date", + "check_cols": null, + "post-hook": [], + "pre-hook": [] + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#0f703d" }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { + "docs": { "node_color": "#0f703d" }, + "target_database": "raw", + "target_schema": "snapshots", + "unique_key": "ifnull(country_region,'') || '|' || ifnull(province_state,'') || '|' || ifnull(county,'') || '|' || to_varchar(date) || last_update_date", + "strategy": "timestamp", + "updated_at": "last_update_date" + }, + "created_at": 1705588677.3256462, + "relation_name": "raw.snapshots.snp_jhu_dashboard_covid_19_global", + "raw_code": "\n{{\n config(\n unique_key=\"ifnull(country_region,'') || '|' || ifnull(province_state,'') || '|' || ifnull(county,'') || '|' || to_varchar(date) || last_update_date\",\n strategy='timestamp',\n updated_at='last_update_date'\n )\n}}\n\n select distinct\n country_region,\n province_state,\n county,\n fips,\n date,\n active,\n people_tested,\n confirmed,\n people_hospitalized,\n deaths,\n recovered,\n incident_rate,\n testing_rate,\n hospitalization_rate,\n long,\n lat,\n iso3166_1,\n iso3166_2,\n last_update_date\n from\n {{ source('covid19_epidemiological_data', 'jhu_dashboard_covid_19_global') }}\n", + "language": "sql", + "refs": [], + "sources": [ + ["covid19_epidemiological_data", "jhu_dashboard_covid_19_global"] + ], + "metrics": [], + "depends_on": { + "macros": [], + "nodes": [ + "source.balboa.covid19_epidemiological_data.jhu_dashboard_covid_19_global" + ] + }, + "compiled_path": null, + "compiled": true, + "compiled_code": "\n\n\n select distinct\n country_region,\n province_state,\n county,\n fips,\n date,\n active,\n people_tested,\n confirmed,\n people_hospitalized,\n deaths,\n recovered,\n incident_rate,\n testing_rate,\n hospitalization_rate,\n long,\n lat,\n iso3166_1,\n iso3166_2,\n last_update_date\n from\n covid19_epidemiological_data.public.JHU_DASHBOARD_COVID_19_GLOBAL", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null } + }, + "seed.balboa.state_codes": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "state_codes", + "resource_type": "seed", + "package_name": "balboa", + "path": "state_codes.csv", + "original_file_path": "seeds/state_codes.csv", + "unique_id": "seed.balboa.state_codes", + "fqn": ["balboa", "state_codes"], + "alias": "state_codes", + "checksum": { + "name": "sha256", + "checksum": "eda0c5839ecfccb47931ca28e9d2b9aab8ed46934c030d3fb33f7f77365e8792" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "seeds", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "seed", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#379965" }, + "contract": { "enforced": false }, + "quote_columns": false, + "post-hook": [], + "pre-hook": [] + }, + "tags": [], + "description": "Lookup table used to convert between state abbreviations and state names", + "columns": { + "state_name": { + "name": "state_name", + "description": "Name of the state", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "state_code": { + "name": "state_code", + "description": "Two letter abbreviation for the state", + "meta": {}, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#379965" }, + "patch_path": "balboa://seeds/state_codes.yml", + "build_path": null, + "deferred": false, + "unrendered_config": { + "schema": "seeds", + "docs": { "node_color": "#379965" }, + "quote_columns": false, + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.453145, + "relation_name": "BALBOA_DEV.gomezn.state_codes", + "raw_code": "", + "root_path": "/Users/noel/code/datacoves/balboa/transform", + "depends_on": { "macros": [] } + }, + "seed.balboa.covid_cases_expected_values": { + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "covid_cases_expected_values", + "resource_type": "seed", + "package_name": "balboa", + "path": "test_values/covid_cases_expected_values.csv", + "original_file_path": "seeds/test_values/covid_cases_expected_values.csv", + "unique_id": "seed.balboa.covid_cases_expected_values", + "fqn": ["balboa", "test_values", "covid_cases_expected_values"], + "alias": "covid_cases_expected_values", + "checksum": { + "name": "sha256", + "checksum": "c7e886c3d20b009650e8a27c93b114959dcaadc2bffe43f6a2e59555b8e6d8cd" + }, + "config": { + "enabled": true, + "alias": null, + "schema": "seeds", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "seed", + "incremental_strategy": null, + "persist_docs": { "relation": true, "columns": true }, + "quoting": {}, + "column_types": {}, + "full_refresh": null, + "unique_key": null, + "on_schema_change": "ignore", + "on_configuration_change": "apply", + "grants": {}, + "packages": [], + "docs": { "show": true, "node_color": "#379965" }, + "contract": { "enforced": false }, + "quote_columns": false, + "post-hook": [], + "pre-hook": [] + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": "#379965" }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { + "schema": "seeds", + "docs": { "node_color": "#379965" }, + "quote_columns": false, + "persist_docs": { "relation": true, "columns": true } + }, + "created_at": 1705588677.352012, + "relation_name": "BALBOA_DEV.gomezn.covid_cases_expected_values", + "raw_code": "", + "root_path": "/Users/noel/code/datacoves/balboa/transform", + "depends_on": { "macros": [] } + }, + "test.balboa.not_null_current_population_country_code.fc48086c4b": { + "test_metadata": { + "name": "not_null", + "kwargs": { + "column_name": "country_code", + "model": "{{ get_where_subquery(ref('current_population')) }}" + }, + "namespace": null + }, + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "not_null_current_population_country_code", + "resource_type": "test", + "package_name": "balboa", + "path": "not_null_current_population_country_code.sql", + "original_file_path": "models/L2_bays/country_demographics/current_population.yml", + "unique_id": "test.balboa.not_null_current_population_country_code.fc48086c4b", + "fqn": [ + "balboa", + "L2_bays", + "country_demographics", + "not_null_current_population_country_code" + ], + "alias": "not_null_current_population_country_code", + "checksum": { "name": "none", "checksum": "" }, + "config": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": true, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { "store_failures": true }, + "created_at": 1705588677.4865322, + "relation_name": "BALBOA_DEV.gomezn.not_null_current_population_country_code", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "language": "sql", + "refs": [ + { "name": "current_population", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.dbt.test_not_null", + "macro.balboa.ref", + "macro.dbt.get_where_subquery" + ], + "nodes": ["model.balboa.current_population"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/country_demographics/current_population.yml/not_null_current_population_country_code.sql", + "compiled": true, + "compiled_code": "\n \n \n\n\n\nselect *\nfrom BALBOA_DEV.gomezn.current_population\nwhere country_code is null\n\n\n", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "column_name": "country_code", + "file_key_name": "models.current_population", + "attached_node": "model.balboa.current_population" + }, + "test.balboa.unique_current_population_country_code.010d1ff45e": { + "test_metadata": { + "name": "unique", + "kwargs": { + "column_name": "country_code", + "model": "{{ get_where_subquery(ref('current_population')) }}" + }, + "namespace": null + }, + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "unique_current_population_country_code", + "resource_type": "test", + "package_name": "balboa", + "path": "unique_current_population_country_code.sql", + "original_file_path": "models/L2_bays/country_demographics/current_population.yml", + "unique_id": "test.balboa.unique_current_population_country_code.010d1ff45e", + "fqn": [ + "balboa", + "L2_bays", + "country_demographics", + "unique_current_population_country_code" + ], + "alias": "unique_current_population_country_code", + "checksum": { "name": "none", "checksum": "" }, + "config": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": true, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { "store_failures": true }, + "created_at": 1705588677.48787, + "relation_name": "BALBOA_DEV.gomezn.unique_current_population_country_code", + "raw_code": "{{ test_unique(**_dbt_generic_test_kwargs) }}", + "language": "sql", + "refs": [ + { "name": "current_population", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.dbt.test_unique", + "macro.balboa.ref", + "macro.dbt.get_where_subquery" + ], + "nodes": ["model.balboa.current_population"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/country_demographics/current_population.yml/unique_current_population_country_code.sql", + "compiled": true, + "compiled_code": "\n \n \n\nselect\n country_code as unique_field,\n count(*) as n_records\n\nfrom BALBOA_DEV.gomezn.current_population\nwhere country_code is not null\ngroup by country_code\nhaving count(*) > 1\n\n\n", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "column_name": "country_code", + "file_key_name": "models.current_population", + "attached_node": "model.balboa.current_population" + }, + "test.balboa.not_null_base_cases_cases.a7292b3eca": { + "test_metadata": { + "name": "not_null", + "kwargs": { + "column_name": "cases", + "model": "{{ get_where_subquery(ref('base_cases')) }}" + }, + "namespace": null + }, + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "not_null_base_cases_cases", + "resource_type": "test", + "package_name": "balboa", + "path": "not_null_base_cases_cases.sql", + "original_file_path": "models/L2_bays/covid_observations/base_cases.yml", + "unique_id": "test.balboa.not_null_base_cases_cases.a7292b3eca", + "fqn": [ + "balboa", + "L2_bays", + "covid_observations", + "not_null_base_cases_cases" + ], + "alias": "not_null_base_cases_cases", + "checksum": { "name": "none", "checksum": "" }, + "config": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": true, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { "store_failures": true }, + "created_at": 1705588677.492555, + "relation_name": "BALBOA_DEV.gomezn.not_null_base_cases_cases", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "language": "sql", + "refs": [{ "name": "base_cases", "package": null, "version": null }], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.dbt.test_not_null", + "macro.balboa.ref", + "macro.dbt.get_where_subquery" + ], + "nodes": ["model.balboa.base_cases"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/covid_observations/base_cases.yml/not_null_base_cases_cases.sql", + "compiled": true, + "compiled_code": "\n \n \n\n\n\nselect *\nfrom BALBOA_DEV.gomezn.base_cases\nwhere cases is null\n\n\n", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "column_name": "cases", + "file_key_name": "models.base_cases", + "attached_node": "model.balboa.base_cases" + }, + "test.balboa.not_null_covid_location_state.de61c768b2": { + "test_metadata": { + "name": "not_null", + "kwargs": { + "column_name": "state", + "model": "{{ get_where_subquery(ref('covid_location')) }}" + }, + "namespace": null + }, + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "not_null_covid_location_state", + "resource_type": "test", + "package_name": "balboa", + "path": "not_null_covid_location_state.sql", + "original_file_path": "models/L2_bays/covid_observations/covid_location.yml", + "unique_id": "test.balboa.not_null_covid_location_state.de61c768b2", + "fqn": [ + "balboa", + "L2_bays", + "covid_observations", + "not_null_covid_location_state" + ], + "alias": "not_null_covid_location_state", + "checksum": { "name": "none", "checksum": "" }, + "config": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": true, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { "store_failures": true }, + "created_at": 1705588677.494693, + "relation_name": "BALBOA_DEV.gomezn.not_null_covid_location_state", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "language": "sql", + "refs": [{ "name": "covid_location", "package": null, "version": null }], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.dbt.test_not_null", + "macro.balboa.ref", + "macro.dbt.get_where_subquery" + ], + "nodes": ["model.balboa.covid_location"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/covid_observations/covid_location.yml/not_null_covid_location_state.sql", + "compiled": true, + "compiled_code": "\n \n \n\n\n\nselect *\nfrom BALBOA_DEV.gomezn.covid_location\nwhere state is null\n\n\n", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "column_name": "state", + "file_key_name": "models.covid_location", + "attached_node": "model.balboa.covid_location" + }, + "test.balboa.not_null_covid_location_country.2d0d8f32fe": { + "test_metadata": { + "name": "not_null", + "kwargs": { + "column_name": "country", + "model": "{{ get_where_subquery(ref('covid_location')) }}" + }, + "namespace": null + }, + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "not_null_covid_location_country", + "resource_type": "test", + "package_name": "balboa", + "path": "not_null_covid_location_country.sql", + "original_file_path": "models/L2_bays/covid_observations/covid_location.yml", + "unique_id": "test.balboa.not_null_covid_location_country.2d0d8f32fe", + "fqn": [ + "balboa", + "L2_bays", + "covid_observations", + "not_null_covid_location_country" + ], + "alias": "not_null_covid_location_country", + "checksum": { "name": "none", "checksum": "" }, + "config": { + "enabled": true, + "alias": null, + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": true, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { "store_failures": true }, + "created_at": 1705588677.4956439, + "relation_name": "BALBOA_DEV.gomezn.not_null_covid_location_country", + "raw_code": "{{ test_not_null(**_dbt_generic_test_kwargs) }}", + "language": "sql", + "refs": [{ "name": "covid_location", "package": null, "version": null }], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.dbt.test_not_null", + "macro.balboa.ref", + "macro.dbt.get_where_subquery" + ], + "nodes": ["model.balboa.covid_location"] + }, + "compiled_path": "target/compiled/balboa/models/L2_bays/covid_observations/covid_location.yml/not_null_covid_location_country.sql", + "compiled": true, + "compiled_code": "\n \n \n\n\n\nselect *\nfrom BALBOA_DEV.gomezn.covid_location\nwhere country is null\n\n\n", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "column_name": "country", + "file_key_name": "models.covid_location", + "attached_node": "model.balboa.covid_location" + }, + "test.balboa.dbt_utils_unique_combination_of_columns_country_populations_country_code__year.f0f4e51143": { + "test_metadata": { + "name": "unique_combination_of_columns", + "kwargs": { + "combination_of_columns": ["country_code", "year"], + "model": "{{ get_where_subquery(ref('country_populations')) }}" + }, + "namespace": "dbt_utils" + }, + "database": "BALBOA_DEV", + "schema": "gomezn", + "name": "dbt_utils_unique_combination_of_columns_country_populations_country_code__year", + "resource_type": "test", + "package_name": "balboa", + "path": "dbt_utils_unique_combination_o_ccf7ac78693d777d133554a8bfe73c1e.sql", + "original_file_path": "models/L1_inlets/country_data/country_populations.yml", + "unique_id": "test.balboa.dbt_utils_unique_combination_of_columns_country_populations_country_code__year.f0f4e51143", + "fqn": [ + "balboa", + "L1_inlets", + "country_data", + "dbt_utils_unique_combination_of_columns_country_populations_country_code__year" + ], + "alias": "dbt_utils_unique_combination_of_columns_country_populations_country_code", + "checksum": { "name": "none", "checksum": "" }, + "config": { + "enabled": true, + "alias": "dbt_utils_unique_combination_o_ccf7ac78693d777d133554a8bfe73c1e", + "schema": "dbt_test__audit", + "database": null, + "tags": [], + "meta": {}, + "group": null, + "materialized": "test", + "severity": "ERROR", + "store_failures": true, + "where": null, + "limit": null, + "fail_calc": "count(*)", + "warn_if": "!= 0", + "error_if": "!= 0" + }, + "tags": [], + "description": "", + "columns": {}, + "meta": {}, + "group": null, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "build_path": null, + "deferred": false, + "unrendered_config": { + "store_failures": true, + "alias": "dbt_utils_unique_combination_o_ccf7ac78693d777d133554a8bfe73c1e" + }, + "created_at": 1705588677.548672, + "relation_name": "BALBOA_DEV.gomezn.dbt_utils_unique_combination_of_columns_country_populations_country_code", + "raw_code": "{{ dbt_utils.test_unique_combination_of_columns(**_dbt_generic_test_kwargs) }}{{ config(alias=\"dbt_utils_unique_combination_o_ccf7ac78693d777d133554a8bfe73c1e\") }}", + "language": "sql", + "refs": [ + { "name": "country_populations", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "depends_on": { + "macros": [ + "macro.dbt_utils.test_unique_combination_of_columns", + "macro.dbt.get_where_subquery", + "macro.balboa.ref" + ], + "nodes": ["model.balboa.country_populations"] + }, + "compiled_path": "target/compiled/balboa/models/L1_inlets/country_data/country_populations.yml/dbt_utils_unique_combination_o_ccf7ac78693d777d133554a8bfe73c1e.sql", + "compiled": true, + "compiled_code": "\n\n\n\n\n\nwith validation_errors as (\n\n select\n country_code, year\n from BALBOA_DEV.gomezn.country_populations\n group by country_code, year\n having count(*) > 1\n\n)\n\nselect *\nfrom validation_errors\n\n\n", + "extra_ctes_injected": true, + "extra_ctes": [], + "contract": { "enforced": false, "checksum": null }, + "column_name": null, + "file_key_name": "models.country_populations", + "attached_node": "model.balboa.country_populations" + } + }, + "sources": { + "source.balboa.LOANS.PERSONAL_LOANS": { + "database": "RAW", + "schema": "LOANS", + "name": "PERSONAL_LOANS", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/loans/_loans.yml", + "original_file_path": "models/L1_inlets/loans/_loans.yml", + "unique_id": "source.balboa.LOANS.PERSONAL_LOANS", + "fqn": ["balboa", "L1_inlets", "loans", "LOANS", "PERSONAL_LOANS"], + "source_name": "LOANS", + "source_description": "", + "loader": "", + "identifier": "PERSONAL_LOANS", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Personal Loans data", + "columns": { + "_airbyte_raw_id": { + "name": "_airbyte_raw_id", + "description": "", + "meta": { "masking_policy": "masking_policy_pii_string" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "_airbyte_extracted_at": { + "name": "_airbyte_extracted_at", + "description": "", + "meta": { "masking_policy": "masking_policy_pii_timestamp_tz" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "total_acc": { + "name": "total_acc", + "description": "", + "meta": { "masking_policy": "masking_policy_pii_float" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + }, + "_airbyte_meta": { + "name": "_airbyte_meta", + "description": "", + "meta": { "masking_policy": "masking_policy_pii_variant" }, + "data_type": null, + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "source_meta": {}, + "tags": ["daily_run_airbyte"], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "RAW.LOANS.PERSONAL_LOANS", + "created_at": 1705588677.5729222 + }, + "source.balboa.covid19_epidemiological_data.jhu_covid_19": { + "database": "covid19_epidemiological_data", + "schema": "public", + "name": "jhu_covid_19", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/covid19_epidemiological_data/_covid19_epidemiological_data.yml", + "original_file_path": "models/L1_inlets/covid19_epidemiological_data/_covid19_epidemiological_data.yml", + "unique_id": "source.balboa.covid19_epidemiological_data.jhu_covid_19", + "fqn": [ + "balboa", + "L1_inlets", + "covid19_epidemiological_data", + "covid19_epidemiological_data", + "jhu_covid_19" + ], + "source_name": "covid19_epidemiological_data", + "source_description": "", + "loader": "", + "identifier": "JHU_COVID_19", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Starschema John Hopkins COVID19 data", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": [], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "covid19_epidemiological_data.public.JHU_COVID_19", + "created_at": 1705588677.5730538 + }, + "source.balboa.covid19_epidemiological_data.jhu_dashboard_covid_19_global": { + "database": "covid19_epidemiological_data", + "schema": "public", + "name": "jhu_dashboard_covid_19_global", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/covid19_epidemiological_data/_covid19_epidemiological_data.yml", + "original_file_path": "models/L1_inlets/covid19_epidemiological_data/_covid19_epidemiological_data.yml", + "unique_id": "source.balboa.covid19_epidemiological_data.jhu_dashboard_covid_19_global", + "fqn": [ + "balboa", + "L1_inlets", + "covid19_epidemiological_data", + "covid19_epidemiological_data", + "jhu_dashboard_covid_19_global" + ], + "source_name": "covid19_epidemiological_data", + "source_description": "", + "loader": "", + "identifier": "JHU_DASHBOARD_COVID_19_GLOBAL", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": "last_update_date", + "freshness": { + "warn_after": { "count": 1, "period": "minute" }, + "error_after": { "count": 36, "period": "hour" }, + "filter": null + }, + "external": null, + "description": "Starschema Global COVID data", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": [], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "covid19_epidemiological_data.public.JHU_DASHBOARD_COVID_19_GLOBAL", + "created_at": 1705588677.573142 + }, + "source.balboa.lineage.lineage_processing": { + "database": "RAW", + "schema": "RAW", + "name": "lineage_processing", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/lineage/lineage_files.yml", + "original_file_path": "models/L1_inlets/lineage/lineage_files.yml", + "unique_id": "source.balboa.lineage.lineage_processing", + "fqn": [ + "balboa", + "L1_inlets", + "lineage", + "lineage", + "lineage_processing" + ], + "source_name": "lineage", + "source_description": "", + "loader": "S3", + "identifier": "lineage_processing", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": { + "location": "@raw.raw.lineage_data", + "file_format": "( type = csv skip_header = 1 field_delimiter = ',' null_if = ('NULL', 'null') empty_field_as_null = true )", + "row_format": null, + "tbl_properties": null, + "partitions": null, + "pattern": ".*PROCESSING.*[.]csv", + "auto_refresh": false + }, + "description": "Lineage for Processing step", + "columns": { + "source": { + "name": "source", + "description": "", + "meta": {}, + "data_type": "string", + "constraints": [], + "quote": null, + "tags": [] + }, + "source_object": { + "name": "source_object", + "description": "", + "meta": {}, + "data_type": "string", + "constraints": [], + "quote": null, + "tags": [] + }, + "process": { + "name": "process", + "description": "", + "meta": {}, + "data_type": "string", + "constraints": [], + "quote": null, + "tags": [] + }, + "destination": { + "name": "destination", + "description": "", + "meta": {}, + "data_type": "string", + "constraints": [], + "quote": null, + "tags": [] + }, + "destination_object": { + "name": "destination_object", + "description": "", + "meta": {}, + "data_type": "string", + "constraints": [], + "quote": null, + "tags": [] + }, + "comment": { + "name": "comment", + "description": "", + "meta": {}, + "data_type": "string", + "constraints": [], + "quote": null, + "tags": [] + }, + "data_lineage_existance_check": { + "name": "data_lineage_existance_check", + "description": "", + "meta": {}, + "data_type": "string", + "constraints": [], + "quote": null, + "tags": [] + } + }, + "meta": {}, + "source_meta": {}, + "tags": [], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "RAW.RAW.lineage_processing", + "created_at": 1705588677.573243 + }, + "source.balboa.ACCOUNT_USAGE.PIPE_USAGE_HISTORY": { + "database": "SNOWFLAKE", + "schema": "ACCOUNT_USAGE", + "name": "PIPE_USAGE_HISTORY", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/account_usage/_account_usage.yml", + "original_file_path": "models/L1_inlets/account_usage/_account_usage.yml", + "unique_id": "source.balboa.ACCOUNT_USAGE.PIPE_USAGE_HISTORY", + "fqn": [ + "balboa", + "L1_inlets", + "account_usage", + "ACCOUNT_USAGE", + "PIPE_USAGE_HISTORY" + ], + "source_name": "ACCOUNT_USAGE", + "source_description": "", + "loader": "", + "identifier": "PIPE_USAGE_HISTORY", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Pipe usage history raw data", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": [], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "SNOWFLAKE.ACCOUNT_USAGE.PIPE_USAGE_HISTORY", + "created_at": 1705588677.573324 + }, + "source.balboa.ACCOUNT_USAGE.QUERY_HISTORY": { + "database": "SNOWFLAKE", + "schema": "ACCOUNT_USAGE", + "name": "QUERY_HISTORY", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/account_usage/_account_usage.yml", + "original_file_path": "models/L1_inlets/account_usage/_account_usage.yml", + "unique_id": "source.balboa.ACCOUNT_USAGE.QUERY_HISTORY", + "fqn": [ + "balboa", + "L1_inlets", + "account_usage", + "ACCOUNT_USAGE", + "QUERY_HISTORY" + ], + "source_name": "ACCOUNT_USAGE", + "source_description": "", + "loader": "", + "identifier": "QUERY_HISTORY", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Query history source model", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": [], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY", + "created_at": 1705588677.5734038 + }, + "source.balboa.ACCOUNT_USAGE.STORAGE_USAGE": { + "database": "SNOWFLAKE", + "schema": "ACCOUNT_USAGE", + "name": "STORAGE_USAGE", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/account_usage/_account_usage.yml", + "original_file_path": "models/L1_inlets/account_usage/_account_usage.yml", + "unique_id": "source.balboa.ACCOUNT_USAGE.STORAGE_USAGE", + "fqn": [ + "balboa", + "L1_inlets", + "account_usage", + "ACCOUNT_USAGE", + "STORAGE_USAGE" + ], + "source_name": "ACCOUNT_USAGE", + "source_description": "", + "loader": "", + "identifier": "STORAGE_USAGE", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Storage usage raw data", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": [], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "SNOWFLAKE.ACCOUNT_USAGE.STORAGE_USAGE", + "created_at": 1705588677.573484 + }, + "source.balboa.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY": { + "database": "SNOWFLAKE", + "schema": "ACCOUNT_USAGE", + "name": "WAREHOUSE_METERING_HISTORY", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/account_usage/_account_usage.yml", + "original_file_path": "models/L1_inlets/account_usage/_account_usage.yml", + "unique_id": "source.balboa.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY", + "fqn": [ + "balboa", + "L1_inlets", + "account_usage", + "ACCOUNT_USAGE", + "WAREHOUSE_METERING_HISTORY" + ], + "source_name": "ACCOUNT_USAGE", + "source_description": "", + "loader": "", + "identifier": "WAREHOUSE_METERING_HISTORY", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Warehouse metering history raw data", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": [], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "SNOWFLAKE.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY", + "created_at": 1705588677.5735612 + }, + "source.balboa.RAW.COUNTRY_POPULATIONS": { + "database": "RAW", + "schema": "RAW", + "name": "COUNTRY_POPULATIONS", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/country_data/_country_data.yml", + "original_file_path": "models/L1_inlets/country_data/_country_data.yml", + "unique_id": "source.balboa.RAW.COUNTRY_POPULATIONS", + "fqn": [ + "balboa", + "L1_inlets", + "country_data", + "RAW", + "COUNTRY_POPULATIONS" + ], + "source_name": "RAW", + "source_description": "", + "loader": "", + "identifier": "COUNTRY_POPULATIONS", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Raw population information from Github Datasets repository", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": ["daily_run_airbyte"], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "RAW.RAW.COUNTRY_POPULATIONS", + "created_at": 1705588677.573639 + }, + "source.balboa.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT": { + "database": "RAW", + "schema": "GOOGLE_ANALYTICS_4", + "name": "ENGAGEMENT_EVENTS_REPORT", + "resource_type": "source", + "package_name": "balboa", + "path": "models/L1_inlets/google_analytics_4/_google_analytics_4.yml", + "original_file_path": "models/L1_inlets/google_analytics_4/_google_analytics_4.yml", + "unique_id": "source.balboa.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT", + "fqn": [ + "balboa", + "L1_inlets", + "google_analytics_4", + "GOOGLE_ANALYTICS_4", + "ENGAGEMENT_EVENTS_REPORT" + ], + "source_name": "GOOGLE_ANALYTICS_4", + "source_description": "", + "loader": "", + "identifier": "ENGAGEMENT_EVENTS_REPORT", + "quoting": { + "database": null, + "schema": null, + "identifier": null, + "column": null + }, + "loaded_at_field": null, + "freshness": { + "warn_after": { "count": null, "period": null }, + "error_after": { "count": null, "period": null }, + "filter": null + }, + "external": null, + "description": "Table containing raw data on user engagement events from Google Analytics 4", + "columns": {}, + "meta": {}, + "source_meta": {}, + "tags": ["daily_run_fivetran"], + "config": { "enabled": true }, + "patch_path": null, + "unrendered_config": {}, + "relation_name": "RAW.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT", + "created_at": 1705588677.573719 + } + }, + "macros": { + "macro.balboa.generate_imports": { + "name": "generate_imports", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/generate_imports.sql", + "original_file_path": "macros/generate_imports.sql", + "unique_id": "macro.balboa.generate_imports", + "macro_sql": "{% macro generate_imports(model_list) %}\n\nWITH \n{% for cte_ref in model_list %} \n{{cte_ref}} AS (\n\nSELECT * \nFROM {{ ref(cte_ref) }}\n){# Add a comma after each CTE except the last one #} {%- if not loop.last -%},{%- endif -%}\n{%- endfor -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "Macro to import sources for table as CTEs\nUsage: {{ generate_imports(\n [\n ( 'alias_model_1', ref('model_1') ),\n ( 'alias_model_2', ref('model_2') ),\n etc\n ]\n) }} , cte_logic as (\n any non-import CTEs that are required in this model\n)", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": "balboa://macros/generate_imports.yml", + "arguments": [], + "created_at": 1705588677.449496, + "supported_languages": null + }, + "macro.balboa.rank_desc": { + "name": "rank_desc", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/rank_desc.sql", + "original_file_path": "macros/rank_desc.sql", + "unique_id": "macro.balboa.rank_desc", + "macro_sql": "{% macro rank_desc(partition_fields, datefield) %}\n-- To be created during training\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0490649, + "supported_languages": null + }, + "macro.balboa.reset_for_dbt_102": { + "name": "reset_for_dbt_102", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/demo/reset_dbt_102.sql", + "original_file_path": "macros/demo/reset_dbt_102.sql", + "unique_id": "macro.balboa.reset_for_dbt_102", + "macro_sql": "\n\n{%- macro reset_for_dbt_102() -%}\n {% set drop_schema_sql %}\n drop schema if exists {{ target.schema }};\n {% endset %}\n\n {% do run_query(drop_schema_sql) %}\n {{ log(\"Dropped Schema: \" ~ target.schema, info=true) }}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0495648, + "supported_languages": null + }, + "macro.balboa.offload_table": { + "name": "offload_table", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/extracting/dump_data.sql", + "original_file_path": "macros/extracting/dump_data.sql", + "unique_id": "macro.balboa.offload_table", + "macro_sql": "{% macro offload_table(model_name, stage = \"DATACOVES_DB.ETL1.EXT_LINEITEM_STAGE\") %}\n\n {{ print(\"Exporting \" + model_name) }}\n\n {% set copy_sql %}\n copy into @{{ stage }}/ng_test_{{ model_name }}/data_\n from (\n select * from {{ ref(model_name) }}\n )\n header = true\n overwrite = true\n max_file_size = 1073741824;\n {% endset %}\n\n {% set results = run_query(copy_sql) %}\n\n {{ print(results.columns[0].values()) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.050426, + "supported_languages": null + }, + "macro.balboa.create_custom_schema": { + "name": "create_custom_schema", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/create_custom_schema.sql", + "original_file_path": "macros/tooling/create_custom_schema.sql", + "unique_id": "macro.balboa.create_custom_schema", + "macro_sql": "\n\n{%- macro create_custom_schema(db_name, schema_name) -%}\n {% set db_name = db_name | upper %}\n {% set schema_name = schema_name | upper %}\n\n {% set db_create_sql %}\n create schema if not exists {{ db_name }}.{{ schema_name }};\n {% endset %}\n {{ log(\"Creating Schema: \" ~ db_name ~ \".\" ~ schema_name, info=true) }}\n {% do run_query(db_create_sql) %}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.051072, + "supported_languages": null + }, + "macro.balboa.generate_alias_name": { + "name": "generate_alias_name", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/generate_alias_name.sql", + "original_file_path": "macros/tooling/generate_alias_name.sql", + "unique_id": "macro.balboa.generate_alias_name", + "macro_sql": "{% macro generate_alias_name(custom_alias_name=none, node=none) -%}\n\n {% if (\"__\" in node.name) and (\"dbt_packages\" not in node.root_path)%}\n {{ node.name.split(\"__\")[0] }}\n {%- else -%}\n {{ node.name }}\n {%- endif -%} \n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.051562, + "supported_languages": null + }, + "macro.balboa.ref": { + "name": "ref", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/ref.sql", + "original_file_path": "macros/tooling/ref.sql", + "unique_id": "macro.balboa.ref", + "macro_sql": "{% macro ref(modelname) %}\n {% set db_name = builtins.ref(modelname).database | lower %}}\n {% if db_name.startswith('staging') or \n db_name.endswith('staging') %}\n {{ return(builtins.ref(modelname).include(database=false)) }}\n {% else %}\n {{ return(builtins.ref(modelname)) }}\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.balboa.ref"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.052198, + "supported_languages": null + }, + "macro.balboa.empty_dev_schema": { + "name": "empty_dev_schema", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/empty_dev_schema.sql", + "original_file_path": "macros/tooling/empty_dev_schema.sql", + "unique_id": "macro.balboa.empty_dev_schema", + "macro_sql": "{% macro empty_dev_schema(dry_run=true) %}\n\n {% set query %}\n select\n schema_name,\n ref_name,\n ref_type\n from (\n select\n table_schema as schema_name,\n table_name as ref_name,\n split_part(table_type, ' ', -1) as ref_type --allows for 'BASE TABLE' rather than 'TABLE' in results\n from information_schema.tables\n where table_schema = upper('{{ target.schema }}')\n )\n {% endset %}\n {%- set result = run_query(query) -%}\n {% if result %}\n {%- for to_delete in result -%}\n {%- if dry_run -%}\n {{ log('to be dropped: ' ~ to_delete[2] ~ ' ' ~ to_delete[0] ~ '.' ~ to_delete[1], true) }}\n {%- else -%}\n {{ log('dropping ' ~ to_delete[2] ~ ' ' ~ to_delete[0] ~ '.' ~ to_delete[1], true) }}\n {% set drop_command = 'drop ' ~ to_delete[2] ~ ' if exists ' ~ to_delete[0] ~ '.' ~ to_delete[1] ~ ' cascade;' %}\n {% do run_query(drop_command) %}\n {{ log('dropped ' ~ to_delete[2] ~ ' ' ~ to_delete[0] ~ '.' ~ to_delete[1], true) }}\n {%- endif -%}\n {%- endfor -%}\n {% else %}\n {{ log('no models to clean.', true) }}\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0538611, + "supported_languages": null + }, + "macro.balboa.create_database": { + "name": "create_database", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/create_database.sql", + "original_file_path": "macros/tooling/create_database.sql", + "unique_id": "macro.balboa.create_database", + "macro_sql": "\n\n{%- macro create_database() -%}\n {%- set database_exists = adapter.get_relation(\n database=target.database,\n schema=\"information_schema\",\n identifier=\"tables\") -%}\n {% if not database_exists %}\n {% set create_db_sql %}\n use role transformer_dbt;\n create database {{ target.database }};\n grant ownership on database {{ target.database }} to role {{ target.role }};\n use role {{ target.role }};\n {% endset %}\n {% do run_query(create_db_sql) %}\n {{ log(\"Created Database: \" ~ target.database, info=true) }}\n {% else %}\n {{ log(\"Database already exists: \" ~ target.database, info=true) }}\n {% endif %}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.05484, + "supported_languages": null + }, + "macro.balboa.drop_recreate_db": { + "name": "drop_recreate_db", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/drop_recreate_db.sql", + "original_file_path": "macros/tooling/drop_recreate_db.sql", + "unique_id": "macro.balboa.drop_recreate_db", + "macro_sql": "\n\n{%- macro drop_recreate_db(db_name, recreate = True) -%}\n {% set db_name = db_name | upper %}\n\n {% set drop_recreate_sql %}\n drop database if exists {{ db_name }};\n {% if recreate %}\n create database if not exists {{ db_name }};\n {{ log(\"Recreating Database: \" ~ db_name, info=true) }}\n {% endif %}\n {% endset %}\n \n {% do run_query(drop_recreate_sql) %}\n\n {% if recreate %}\n {% set apply_grants_sql %}\n grant usage, create schema, monitor on database {{ db_name }} to analyst;\n grant usage on database {{ db_name }} to securityadmin;\n {% endset %}\n\n {{ log(\"Applying grants on Database: \" ~ db_name, info=true) }}\n {% do run_query(apply_grants_sql) %}\n {% endif %}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.055982, + "supported_languages": null + }, + "macro.balboa.generate_schema_name": { + "name": "generate_schema_name", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/generate_schema_name.sql", + "original_file_path": "macros/tooling/generate_schema_name.sql", + "unique_id": "macro.balboa.generate_schema_name", + "macro_sql": "{% macro generate_schema_name(custom_schema_name, node) -%}\n\n {%- set default_schema = node.config.target_schema or target.schema -%}\n \n {%- if custom_schema_name is none or target.name == 'dev' -%}\n\n {{ default_schema | trim }}\n\n {%- else -%}\n \n {{ custom_schema_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.056471, + "supported_languages": null + }, + "macro.balboa.gen_yaml": { + "name": "gen_yaml", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/gen_yaml.sql", + "original_file_path": "macros/tooling/gen_yaml.sql", + "unique_id": "macro.balboa.gen_yaml", + "macro_sql": "{% macro gen_yaml(model_name) %}\n\n{% set model_yaml=[] %}\n\n{% do model_yaml.append('version: 2') %}\n{% do model_yaml.append('') %}\n{% do model_yaml.append('models:') %}\n{% do model_yaml.append(' - name: ' ~ model_name | lower) %}\n{% do model_yaml.append(' columns:') %}\n\n{% set relation=ref(model_name) %}\n{%- set columns = adapter.get_columns_in_relation(relation) -%}\n\n{% for column in columns %}\n {% do model_yaml.append(' - name: ' ~ column.name | lower ) %}\n {% do model_yaml.append('') %}\n{% endfor %}\n\n{% if execute %}\n\n {% set joined = model_yaml | join ('\\n') %}\n {{ log(joined, info=True) }}\n {% do return(joined) %}\n\n{% endif %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.057825, + "supported_languages": null + }, + "macro.balboa.check_db_does_not_exist": { + "name": "check_db_does_not_exist", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/blue-green/check_db_does_not_exist.sql", + "original_file_path": "macros/tooling/blue-green/check_db_does_not_exist.sql", + "unique_id": "macro.balboa.check_db_does_not_exist", + "macro_sql": "{% macro check_db_does_not_exist(db_name) %}\n\n {% set results = run_query(\"show databases like '\" ~ db_name ~ \"'\") %}\n\n {% if results %}\n {{ exceptions.raise_compiler_error(\"Database exists.\") }}\n {% endif %}\n\n {{ log(db_name ~ \" does not exist\", true)}}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.058321, + "supported_languages": null + }, + "macro.balboa.drop_staging_db": { + "name": "drop_staging_db", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/blue-green/drop_staging_db.sql", + "original_file_path": "macros/tooling/blue-green/drop_staging_db.sql", + "unique_id": "macro.balboa.drop_staging_db", + "macro_sql": "\n\n{%- macro drop_staging_db(db_name) -%}\n {% set db_name = db_name | lower %}\n\n {# Only want this for stating with staging #}\n {% if (not db_name.startswith('staging')) and (not db_name.endswith('staging')) %}\n {{ log(\"Database is not a staging db: \" ~ db_name, true)}}\n {{ exceptions.raise_compiler_error(\"Not a staging Database\") }}\n {% else %}\n {{ drop_recreate_db(db_name = db_name, recreate = False) }}\n {{ log(\"Deteled \" + db_name, true) }}\n {% endif %} #}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.balboa.drop_recreate_db"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0590641, + "supported_languages": null + }, + "macro.balboa.swap_database": { + "name": "swap_database", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/blue-green/swap_database.sql", + "original_file_path": "macros/tooling/blue-green/swap_database.sql", + "unique_id": "macro.balboa.swap_database", + "macro_sql": "\n\n{%- macro swap_database(db1, db2) -%}\n {% set swap_db_sql %}\n alter database {{ db1 }} swap with {{ db2 }};\n {% endset %}\n\n {% do run_query(swap_db_sql) %}\n {{ log(\"Swapped database \" ~ db1 ~ \" with \" ~ db2, info=true) }}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.059514, + "supported_languages": null + }, + "macro.balboa.grant_prd_usage": { + "name": "grant_prd_usage", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/blue-green/grant_prd_usage.sql", + "original_file_path": "macros/tooling/blue-green/grant_prd_usage.sql", + "unique_id": "macro.balboa.grant_prd_usage", + "macro_sql": "\n\n{%- macro grant_prd_usage(db_name) -%}\n\n {% set apply_db_grants_sql %}\n grant usage on database {{ db_name }} to role z_db_balboa;\n grant usage on database {{ db_name }} to role useradmin;\n {% endset %}\n {% do run_query(apply_db_grants_sql) %}\n\n {{ log(\"Applied usage grant on Database: \" ~ db_name, info=true) }}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0599449, + "supported_languages": null + }, + "macro.balboa.create_dbt_artifacts_stage": { + "name": "create_dbt_artifacts_stage", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/blue-green/create_dbt_artifacts_stage.sql", + "original_file_path": "macros/tooling/blue-green/create_dbt_artifacts_stage.sql", + "unique_id": "macro.balboa.create_dbt_artifacts_stage", + "macro_sql": "\n\n{%- macro create_dbt_artifacts_stage(db_nmae) -%}\n{# Artifacts is used for v1 #}\n {% set sql %}\n create stage if not exists {{ target.database }}.source_dbt_artifacts.artifacts\n file_format = ( type = json );\n {% endset %}\n {% do run_query(sql) %}\n\n{# dbt_artifacts_stage is used for v2 #}\n {% set sql %}\n create stage if not exists {{ target.database }}.source_dbt_artifacts.dbt_artifacts_stage\n file_format = ( type = json );\n {% endset %}\n {% do run_query(sql) %}\n {{ log(\"Created DBT Artifacts Stage in database: \" + target.database, info=true) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0605922, + "supported_languages": null + }, + "macro.balboa.clone_database": { + "name": "clone_database", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/blue-green/clone_database.sql", + "original_file_path": "macros/tooling/blue-green/clone_database.sql", + "unique_id": "macro.balboa.clone_database", + "macro_sql": "\n\n{%- macro clone_database(source_db, target_db) -%}\n {% set clone_db_sql %}\n DROP DATABASE IF EXISTS {{ target_db }};\n CREATE DATABASE {{ target_db }} CLONE {{ source_db }};\n {% endset %}\n {% do run_query(clone_db_sql) %}\n {{ log(\"Cloned Database: \" ~ target_db ~ \" from \" ~ source_db, info=true) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0610938, + "supported_languages": null + }, + "macro.balboa.snowflake_role_comparison": { + "name": "snowflake_role_comparison", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/snowflake_role_comparison.sql", + "original_file_path": "macros/tooling/secure/snowflake_role_comparison.sql", + "unique_id": "macro.balboa.snowflake_role_comparison", + "macro_sql": "{% macro snowflake_role_comparison(permifrost_role_list, dry_run = true) %}\n {{ print(\"Running as user: \" ~ target.user )}}\n {{ print(\"Running as using target: \" ~ target.name )}}\n {{ print('\\n')}}\n\n {% if(permifrost_role_list |length == 0) %}\n {{ exceptions.raise_compiler_error(\"List of roles to compare is empty\") }}\n {% else %}}\n {% set permifrost_roles = permifrost_role_list.upper().split(',') %}\n\n {% set roles_to_be_created = [] %}\n {% set roles_missing_in_permifrost = [] %}\n\n {% set roles_results = run_query(\"use role securityadmin; show roles;\") %}\n {% set roles_in_snowflake = roles_results.columns[\"name\"].values() %}\n\n {# We don't care about default snowflake schemas #}\n {% set excluded_roles = [\"ORGADMIN\",\"ACCOUNTADMIN\",\"SYSADMIN\",\"SECURITYADMIN\",\"USERADMIN\"] %}\n\n {% for role in permifrost_roles %}\n {% if (role.upper() not in excluded_roles) and role.upper() not in roles_in_snowflake %}\n {{ roles_to_be_created.append(role) }}\n {% endif %}\n {% endfor %}\n\n {% for role in roles_in_snowflake %}\n {% if role.upper() not in permifrost_roles %}\n {{ roles_missing_in_permifrost.append(role) }}\n {% endif %}\n {% endfor %}\n\n {{ print('#######################################')}}\n {{ print('####### Roles not in Permifrost #######')}}\n {{ print('#######################################')}}\n\n {{ print('\\n'.join(roles_missing_in_permifrost))}}\n {{ print('\\n') }}\n\n {% if roles_to_be_created %}\n\n {{ print('#######################################')}}\n {{ print('####### Roles not in Snowflake #######')}}\n {{ print('#######################################')}}\n\n {{ print('\\n'.join(roles_to_be_created))}}\n {{ print('\\n') }}\n\n {% if dry_run == true %}\n {{ print('Roles not created during a dry_run')}}\n {% else %}\n {{ create_snowflake_roles(roles_to_be_created) }}\n {% endif %}\n\n {% else %}\n\n {{ print('=======================================')}}\n {{ print('Roles in Permifrost exist in Snowflake')}}\n {{ print('=======================================\\n')}}\n\n {% endif %}\n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.run_query", "macro.balboa.create_snowflake_roles"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.065454, + "supported_languages": null + }, + "macro.balboa.create_snowflake_warehouses": { + "name": "create_snowflake_warehouses", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/create_snowflake_warehouses.sql", + "original_file_path": "macros/tooling/secure/create_snowflake_warehouses.sql", + "unique_id": "macro.balboa.create_snowflake_warehouses", + "macro_sql": "{% macro create_snowflake_warehouses(warehouses_to_be_created) %}\n \n {% for wh in warehouses_to_be_created %} \n \n {% set create_wh_sql %}\n use role sysadmin;\n create warehouse {{ wh[\"name\"] }}\n {% if \"parameters\" in wh -%}\n with \n {% for k, v in wh[\"parameters\"].items() -%}\n {% if k == \"size\" -%}\n WAREHOUSE_SIZE=\"{{v}}\"\n {% else -%}\n {{k.upper()}}={{v}}\n {% endif -%}\n {%- endfor -%}\n {%- endif -%}\n ;\n {%- endset -%}\n\n {% do run_query(create_wh_sql) %}\n {{ print(\"Warehouse \" ~ wh[\"name\"] ~ \" created\") }}\n\n {% endfor %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.066907, + "supported_languages": null + }, + "macro.balboa.snowflake_schema_comparison": { + "name": "snowflake_schema_comparison", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/snowflake_schema_comparison.sql", + "original_file_path": "macros/tooling/secure/snowflake_schema_comparison.sql", + "unique_id": "macro.balboa.snowflake_schema_comparison", + "macro_sql": "{% macro snowflake_schema_comparison(permifrost_schema_list, dry_run = true) %}\n {{ print(\"Running as user: \" ~ target.user )}}\n {{ print(\"Running as using target: \" ~ target.name )}}\n {{ print('\\n')}}\n\n {% if(permifrost_schema_list |length == 0) %}\n {{ exceptions.raise_compiler_error(\"List of schemas to compare is empty\") }}\n {% else %}}\n\n {% set permifrost_schemas = permifrost_schema_list.upper().split(',') %}\n\n {% set permifrost_databases = [] %}\n\n {% set snowflake_schemas = [] %}\n\n {% set schemas_to_be_created = [] %}\n {% set schemas_missing_in_permifrost = [] %}\n\n {# Get the databases for the schemas defined in permifrost #}\n {% for full_schema_name in permifrost_schemas %}\n {% set db = full_schema_name.split('.')[0] %}\n {% if db not in permifrost_databases %}\n {{ permifrost_databases.append(db) }}\n {% endif %}\n {% endfor %}\n\n {# Go through each database and compare snowflake to permifrost #}\n {% for schema_db in permifrost_databases %}\n {# Get schemas for this database from snowflake #}\n {% set schemas_in_snowflake_db_query %}\n use role securityadmin;\n show schemas in database {{ schema_db }};\n {% endset %}\n\n {% set query_results = run_query(schemas_in_snowflake_db_query) %}\n\n {% set schemas_in_snowflake_db = query_results.columns[\"name\"].values() %}\n\n {# We don't care about default snowflake schemas #}\n {% set excluded_schemas = [\"INFORMATION_SCHEMA\",\"PUBLIC\"] %}\n\n {# Go through each schema that's in snowflake and see if it exists in permifrost #}\n {% for snowflake_schema in schemas_in_snowflake_db %}\n {% set full_schema_name = schema_db + '.' + snowflake_schema.upper() %}\n\n {% if (snowflake_schema.upper() not in excluded_schemas) and full_schema_name not in permifrost_schemas %}\n {{ schemas_missing_in_permifrost.append(full_schema_name) }}\n {% endif %}\n {% endfor %}\n\n {# Go through each schema that's in permifrost and see if it exists in snowflake #}\n {% for permifrost_schema in permifrost_schemas %}\n\n {% set permifrost_schema_db = permifrost_schema.split('.')[0] %}\n {% set permifrost_schema_name = permifrost_schema.split('.')[1] %}\n\n {% if permifrost_schema_db == schema_db %}\n {% if permifrost_schema_name not in schemas_in_snowflake_db %}\n {{ schemas_to_be_created.append(permifrost_schema_db + \".\" + permifrost_schema_name) }}\n {% endif %}\n {% endif %}\n {% endfor %}\n {% endfor %}\n\n\n {{ print('#########################################')}}\n {{ print('####### Schemas not in Permifrost #######')}}\n {{ print('#########################################')}}\n\n {{ print('\\n'.join(schemas_missing_in_permifrost))}}\n {{ print('\\n') }}\n\n {% if schemas_to_be_created %}\n {{ print('########################################')}}\n {{ print('####### Schemas not in Snowflake #######')}}\n {{ print('########################################')}}\n\n {{ print('\\n'.join(schemas_to_be_created)) }}\n {{ print('\\n') }}\n\n {% if dry_run == true %}\n {{ print('Schemas not created during a dry_run')}}\n {% else %}\n {{ create_snowflake_schemas(schemas_to_be_created) }}\n {% endif %}\n\n {% else %}\n\n {{ print('=========================================')}}\n {{ print('Schemas in Permifrost exist in Snowflake')}}\n {{ print('=========================================\\n')}}\n\n {% endif %}\n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.run_query", + "macro.balboa.create_snowflake_schemas" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0717921, + "supported_languages": null + }, + "macro.balboa.add_grants_to_share": { + "name": "add_grants_to_share", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/add_grants_to_share.sql", + "original_file_path": "macros/tooling/secure/add_grants_to_share.sql", + "unique_id": "macro.balboa.add_grants_to_share", + "macro_sql": "{% macro add_grants_to_share(share_name) %}\n\n {% if target.name == 'prd' %}\n\n {% set grant_sql %}\n use role securityadmin;\n grant select on {{ this }} to share {{ share_name }}\n {% endset %}\n\n {% do run_query(grant_sql) %}\n\n {% endif %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.072316, + "supported_languages": null + }, + "macro.balboa.create_streamlit_app": { + "name": "create_streamlit_app", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/create_streamlit_app.sql", + "original_file_path": "macros/tooling/secure/create_streamlit_app.sql", + "unique_id": "macro.balboa.create_streamlit_app", + "macro_sql": "\n\n\n\n{%- macro create_streamlit_app(\n app_name,\n app_main_file,\n grant_to_roles,\n app_schema=\"balboa_apps.resources\",\n app_stage=\"balboa_apps.resources.streamlit\",\n app_warehouse=\"wh_transforming\") -%}\n\n {% set fully_qualified_app_name = app_schema + '.' + app_name %}\n\n {% set create_sql %}\n create streamlit if not exists {{ fully_qualified_app_name }}\n root_location = '@{{ app_stage }}'\n main_file = '/{{ app_main_file }}'\n query_warehouse = '{{ app_warehouse }}';\n {% endset %}\n\n {% set results = run_query(create_sql) %}\n\n {{ print(results.columns[0].values() ) }}\n\n {% set grant_sql %}\n {% for role in grant_to_roles %}\n grant usage on streamlit {{ fully_qualified_app_name }} to role {{ role }};\n {% endfor %}\n {% endset %}\n\n {% set results = run_query(grant_sql) %}\n\n {{ print(results.columns[0].values() ) }}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.073545, + "supported_languages": null + }, + "macro.balboa.snowflake_warehouse_comparison": { + "name": "snowflake_warehouse_comparison", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/snowflake_warehouse_comparison.sql", + "original_file_path": "macros/tooling/secure/snowflake_warehouse_comparison.sql", + "unique_id": "macro.balboa.snowflake_warehouse_comparison", + "macro_sql": "{% macro snowflake_warehouse_comparison(permifrost_warehouse_list, dry_run = true) %}\n {{ print(\"Running as user: \" ~ target.user )}}\n {{ print(\"Running as using target: \" ~ target.name )}}\n {{ print('\\n')}}\n\n {% set permifrost_warehouses = [] %}\n {% set warehouses_to_be_created = [] %}\n {% set warehouses_missing_in_permifrost = [] %}\n\n {% for wh in permifrost_warehouse_list %}\n {% for k, v in wh.items() %}\n {% if k == \"name\" %}\n {{ permifrost_warehouses.append(v.upper()) }}\n {% endif %}\n {% endfor %}\n {% endfor %}\n\n {% set warehouse_results = run_query(\"use role sysadmin; show warehouses;\") %}\n {% set warehousess_in_snowflake = warehouse_results.columns[\"name\"].values() %}\n\n {% for warehouse in permifrost_warehouse_list %}\n {% if warehouse['name'].upper() not in warehousess_in_snowflake %}\n {{ warehouses_to_be_created.append(warehouse) }}\n {% endif %}\n {% endfor %}\n\n {% for warehouse in warehousess_in_snowflake %}\n {% if warehouse.upper() not in permifrost_warehouses %}\n {{ warehouses_missing_in_permifrost.append(warehouse) }}\n {% endif %}\n {% endfor %}\n\n {{ print('############################################')}}\n {{ print('####### Warehouses not in Permifrost #######')}}\n {{ print('############################################')}}\n\n {{ print('\\n'.join(warehouses_missing_in_permifrost))}}\n {{ print('\\n') }}\n\n {% if warehouses_to_be_created %}\n\n {{ print('###########################################')}}\n {{ print('####### Warehouses not in Snowflake #######')}}\n {{ print('###########################################')}}\n\n {% for warehouse in warehouses_to_be_created %}\n {{ print(warehouse['name'].upper()) }}\n {% endfor %}\n {{ print('\\n') }}\n\n {% if dry_run == true %}\n {{ print('Warehouses not created during a dry_run')}}\n {% else %}\n {{ create_snowflake_warehouses(warehouses_to_be_created) }}\n {% endif %}\n\n {% else %}\n\n {{ print('===========================================')}}\n {{ print('Warehouses in Permifrost exist in Snowflake')}}\n {{ print('===========================================\\n')}}\n\n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.run_query", + "macro.balboa.create_snowflake_warehouses" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.0778701, + "supported_languages": null + }, + "macro.balboa.create_snowflake_roles": { + "name": "create_snowflake_roles", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/create_snowflake_roles.sql", + "original_file_path": "macros/tooling/secure/create_snowflake_roles.sql", + "unique_id": "macro.balboa.create_snowflake_roles", + "macro_sql": "{% macro create_snowflake_roles(roles_to_be_created) %}\n {% for role in roles_to_be_created %} \n \n {% set create_role_sql %}\n use role securityadmin;\n create role {{ role.upper() }};\n {% endset %}\n\n {% do run_query(create_role_sql) %}\n {{ print(\"Role \"~role~\" created\") }}\n \n {% endfor %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.078413, + "supported_languages": null + }, + "macro.balboa.enable_orgadmin": { + "name": "enable_orgadmin", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/enable_orgadmin.sql", + "original_file_path": "macros/tooling/secure/enable_orgadmin.sql", + "unique_id": "macro.balboa.enable_orgadmin", + "macro_sql": "\n\n{%- macro enable_orgadmin(run_from_account, enable_on_account) -%}\n\n {% if run_from_account == enable_on_account %}\n use role accountadmin;\n {% else %}\n use role orgadmin;\n {% endif %}\n alter account {{account}} set is_org_admin = true;\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.078738, + "supported_languages": null + }, + "macro.balboa.create_snowflake_schemas": { + "name": "create_snowflake_schemas", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/create_snowflake_schemas.sql", + "original_file_path": "macros/tooling/secure/create_snowflake_schemas.sql", + "unique_id": "macro.balboa.create_snowflake_schemas", + "macro_sql": "{% macro create_snowflake_schemas(schemas_to_be_created) %}\n {% for schema in schemas_to_be_created %}\n\n {% set create_schema_sql %}\n use role transformer_dbt;\n create schema {{ schema.upper() }};\n {% endset %}\n\n {% do run_query(create_schema_sql) %}\n {{ log(\"Schema \" ~ schema ~ \" created\", info=true) }}\n\n {% endfor %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.079268, + "supported_languages": null + }, + "macro.balboa.create_non_permifrost_items": { + "name": "create_non_permifrost_items", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/secure/create_non_permifrost_items.sql", + "original_file_path": "macros/tooling/secure/create_non_permifrost_items.sql", + "unique_id": "macro.balboa.create_non_permifrost_items", + "macro_sql": "\n\n{%- macro create_non_permifrost_items(databases = [], accounts_to_share_to = [], stages = []) -%}\n\n {%- set create_objects_sql -%}\n\n use role accountadmin;\n\n grant create database on account to role transformer_dbt;\n\n {%- for account in accounts_to_share_to -%}\n alter database {{ database_name }} enable replication to {{ account }};\n {%- endfor -%}\n\n grant monitor usage on account to role z_db_snowflake;\n\n {%- for database in databases -%}\n create database if not exists {{ database }};\n grant ownership on database {{ database }} to role transformer_dbt revoke current grants;\n {%- endfor -%}\n\n grant create masking policy on schema {{ var('common_masking_policy_db') }}.{{ var('common_masking_policy_schema') }} to role transformer_dbt;\n grant apply masking policy on account to role transformer_dbt;\n\n use role securityadmin;\n grant imported privileges on database snowflake to role z_db_snowflake;\n\n {%- for stage in stages -%}\n {%- set database_name = stage.split('.')[0] -%}\n {%- set schema_name = stage.split('.')[1] -%}\n\n use role transformer_dbt;\n create schema if not exists {{ database_name }}.{{ schema_name }};\n\n create stage if not exists {{ stage }}\n directory = (enable=true)\n file_format = (type=csv field_delimiter=none record_delimiter=none);\n\n use role securityadmin;\n\n grant read on stage {{stage}} to role z_stage_{{ schema_name }}_read;\n grant read on stage {{stage}} to role z_stage_{{ schema_name }}_write;\n grant write on stage {{stage}} to role z_stage_{{ schema_name }}_write;\n\n {%- endfor -%}\n\n use role securityadmin;\n grant role transformer_dbt to user {{target.user}};\n {%- endset -%}\n\n {{ print(\"\\n\\nRunning the following SQL:\") }}\n {{ print(\"=\"*30) }}\n {{ print(create_objects_sql) }}\n {{ print(\"=\"*30) }}\n\n {%- set results = run_query(create_objects_sql) -%}\n\n {{ print(results.columns[0].values()) }}\n {{ print(\"Objects Created\") }}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.081951, + "supported_languages": null + }, + "macro.balboa.grant_access_to_pr_database": { + "name": "grant_access_to_pr_database", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/ci-cd/grant_access_to_pr_database.sql", + "original_file_path": "macros/tooling/ci-cd/grant_access_to_pr_database.sql", + "unique_id": "macro.balboa.grant_access_to_pr_database", + "macro_sql": "\n\n{%- macro grant_access_to_pr_database() -%}\n {% set db_role_name = 'z_db_balboa_tst' %}\n {% set db_name = target.database %}\n\n {% set apply_db_grants_sql %}\n grant usage on database {{ db_name }} to role {{db_role_name}};\n {% endset %}\n\n {% do run_query(apply_db_grants_sql) %}\n\n {% set schemas_list %}\n select schema_name \n from {{ db_name }}.information_schema.schemata\n where schema_name not in ('INFORMATION_SCHEMA','PUBLIC','DBT_TEST__AUDIT')\n {% endset %}\n\n {% set schemas = run_query(schemas_list) %}\n {% for schema in schemas %}\n\n {% set apply_schema_grants_sql %}\n grant usage on schema {{db_name}}.{{ schema[0] }} to z_schema_{{schema[0]}};\n grant select on all tables in schema {{db_name}}.{{ schema[0] }} to role z_tables_views_general;\n grant select on all views in schema {{db_name}}.{{ schema[0] }} to role z_tables_views_general;\n {% endset %}\n\n {% do run_query(apply_schema_grants_sql) %}\n {{ log(\"Applied grants on Schema: \" ~ db_name ~ '.' ~ schema[0], info=true) }}\n {% endfor %}\n\n {{ log(\"Applied grants on Database: \" ~ db_name, info=true) }}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.083725, + "supported_languages": null + }, + "macro.balboa.get_last_artifacts": { + "name": "get_last_artifacts", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/ci-cd/get_last_artifacts.sql", + "original_file_path": "macros/tooling/ci-cd/get_last_artifacts.sql", + "unique_id": "macro.balboa.get_last_artifacts", + "macro_sql": "{% macro get_last_artifacts(stage = 'RAW.DBT_ARTIFACTS.ARTIFACTS') %}\n {# we will put the manifest.json in the log directory and use the with the --state param in dbt #}\n {% set logs_dir = env_var('DATACOVES__DBT_HOME') ~ \"/logs/\" %}\n\n {# List only the .json files in the root folder (excludes archive dir) #}\n {% set list_stage_query %}\n LIST @{{ stage }} PATTERN = '^((?!(archive/)).)*.json$';\n {% endset %}\n\n {{ print(\"\\nCurrent items in stage \" ~ stage) }}\n {% set results = run_query(list_stage_query) %}\n {{ results.exclude('md5').print_table(max_column_width=40) }}\n {{ print(\"\\n\" ~ \"=\"*85) }}\n\n {% set artifacts_destination = \"file://\" + logs_dir %}\n\n {% set get_query %}\n get @{{ stage }}/manifest.json {{ artifacts_destination }};\n get @{{ stage }}/catalog.json {{ artifacts_destination }};\n {% endset %}\n\n {% set results = run_query(get_query) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.084827, + "supported_languages": null + }, + "macro.balboa.remove_closed_pr_dbs": { + "name": "remove_closed_pr_dbs", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/ci-cd/remove_closed_pr_dbs.sql", + "original_file_path": "macros/tooling/ci-cd/remove_closed_pr_dbs.sql", + "unique_id": "macro.balboa.remove_closed_pr_dbs", + "macro_sql": "\n\n{%- macro remove_closed_pr_dbs(pr_ids) -%}\n {% if (pr_ids is not none) and ('|' in pr_ids|string) %}\n\n {% set pr_array = pr_ids.split(\"|\") %}\n\n {% for this_pr in pr_array %}\n {% set this_db = 'BALBOA_PR_' ~ this_pr %}\n \n {{ log(\"Running drop statement for database: \" ~ this_db, info=true) }}\n {% set drop_db_sql %}\n DROP DATABASE IF EXISTS {{ this_db }};\n {% endset %}\n\n {% do run_query(drop_db_sql) %}\n {% endfor %}\n\n {% else %}\n {% set this_db = 'BALBOA_PR_' ~ pr_ids %}\n\n {{ log(\"Running drop statement for database: \" ~ this_db, info=true) }}\n\n {% set drop_db_sql %}\n DROP DATABASE IF EXISTS {{ this_db }};\n {% endset %}\n\n {% do run_query(drop_db_sql) %}\n {% endif %}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.086068, + "supported_languages": null + }, + "macro.balboa.upload_artifacts": { + "name": "upload_artifacts", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/ci-cd/upload_artifacts.sql", + "original_file_path": "macros/tooling/ci-cd/upload_artifacts.sql", + "unique_id": "macro.balboa.upload_artifacts", + "macro_sql": "{% macro upload_artifacts(stage = 'RAW.DBT_ARTIFACTS.ARTIFACTS', artifacts = ['manifest.json', 'run_results.json', 'catalog.json'], run_as_role = 'transformer_dbt') %}\n {{ print(\"Uploading manifest for dbt version: \" ~ dbt_version) }}\n\n {% set set_date_query %}\n SELECT TO_CHAR(SYSTIMESTAMP(), 'YYYYMMDD_HH24MISS') as timestamp, TO_CHAR(SYSTIMESTAMP(), 'YYYYMM') as day_month;\n {% endset %}\n {% set results = run_query(set_date_query) %}\n\n {% set target_dir = \"target/\" %}\n\n {% set file_prefix = results.columns[0].values()[0] ~ \"_\" %}\n {% set archive_folder_name = \"archive/\" ~ results.columns[1].values()[0] ~ \"/\" %}\n\n {% set set_role_query %}\n use role {{ run_as_role }};\n {% endset %}\n {% do run_query(set_role_query) %}\n\n {# List only the .json files in the root folder (excludes archive dir) #}\n {% set list_stage_query %}\n LIST @{{ stage }} PATTERN = '^((?!(archive/)).)*.json$';\n {% endset %}\n\n {{ print(\"\\nCurrent items in stage \" ~ stage) }}\n {% set results = run_query(list_stage_query) %}\n {{ results.exclude('md5').print_table(max_column_width=40) }}\n {{ print(\"\\n\" ~ \"=\"*85) }}\n\n {% for artifact in artifacts %}\n\n {% set artifact_path = \"file://\" + target_dir ~ artifact %}\n\n {% set artifact_file_name = file_prefix ~ artifact %}\n {% set artifact_archive_path = archive_folder_name ~ artifact_file_name %}\n\n {% set put_query %}\n put {{ artifact_path }} @{{ stage }} AUTO_COMPRESS=false OVERWRITE=true;\n put {{ artifact_path }} @{{ stage }}/{{artifact_archive_path}} AUTO_COMPRESS=false OVERWRITE=true;\n {% endset %}\n\n {{ print(\"\\nSnowflake PUT query for file: \" ~ artifact )}}\n {# This is just here to format the string and remove the leading spaces #}\n {% for line in put_query.splitlines() %}\n {{ print(line.lstrip()) }}\n {% endfor %}\n\n {% set results = run_query(put_query) %}\n\n {% endfor %}\n\n {{ print(\"=\"*85) }}\n {{ print(\"\\nItems in stage \" ~ stage ~ \" after PUT\") }}\n {% set results = run_query(list_stage_query) %}\n {{ results.exclude('md5').print_table(max_column_width=40) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.089324, + "supported_languages": null + }, + "macro.balboa.deploy_streamlit_app": { + "name": "deploy_streamlit_app", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/ci-cd/deploy_streamlit_app.sql", + "original_file_path": "macros/tooling/ci-cd/deploy_streamlit_app.sql", + "unique_id": "macro.balboa.deploy_streamlit_app", + "macro_sql": "\n\n{%- macro deploy_streamlit_app(\n app_path=\"/config/workspace/observe/streamlit/loans-example\",\n app_file=\"loans.py\",\n app_stage=\"balboa_apps.resources.streamlit\") -%}\n\n {% set create_sql %}\n PUT 'file://{{ app_path }}/{{ app_file }}'\n '@{{ app_stage }}'\n overwrite=true\n auto_compress=false;\n {% endset %}\n\n {% set results = run_query(create_sql) %}\n\n {{ print(\"Application deployed: \" + results.columns[0].values()[0] ) }}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.090249, + "supported_languages": null + }, + "macro.balboa.drop_orphaned_relations": { + "name": "drop_orphaned_relations", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tooling/ci-cd/drop_orphaned_relations.sql", + "original_file_path": "macros/tooling/ci-cd/drop_orphaned_relations.sql", + "unique_id": "macro.balboa.drop_orphaned_relations", + "macro_sql": "\n\n{%- macro drop_orphaned_relations(dry_run='true') -%}\n\n {%- if execute -%}\n\n -- Create empty dictionary that will contain the hierarchy of the models in dbt\n {%- set current_model_locations = {} -%}\n\n -- Insert the hierarchy database.schema.table in the dictionary above\n {%- for node in graph.nodes.values() | selectattr(\"resource_type\", \"in\", [\"model\", \"seed\", \"snapshot\"]) -%}\n\n {%- set database_name = node.database.upper() -%}\n {%- set schema_name = node.schema.upper() -%}\n {%- set table_name = node.alias if node.alias else node.name -%}\n\n -- Add db name if it does not exist in the dict\n {%- if not database_name in current_model_locations -%}\n {% do current_model_locations.update({database_name: {}}) -%}\n {%- endif -%}\n\n -- Add schema name if it does not exist in the dict\n {%- if not schema_name in current_model_locations[database_name] -%}\n {% do current_model_locations[database_name].update({schema_name: []}) -%}\n {%- endif -%}\n\n -- Add the tables for the db and schema selected\n {%- do current_model_locations[database_name][schema_name].append(table_name.upper()) -%}\n\n {%- endfor -%}\n\n {%- endif -%}\n\n -- Query to retrieve the models to drop\n {%- set cleanup_query -%}\n\n WITH models_to_drop AS (\n {%- for database in current_model_locations.keys() -%}\n {%- if loop.index > 1 %}\n UNION ALL\n {% endif %}\n\n SELECT\n CASE\n WHEN table_type = 'BASE TABLE' THEN 'TABLE'\n WHEN table_type = 'VIEW' THEN 'VIEW'\n ELSE NULL\n END AS relation_type,\n table_catalog,\n table_schema,\n table_name,\n concat_ws('.', table_catalog, table_schema, table_name) as relation_name\n FROM {{ database }}.information_schema.tables\n WHERE\n table_schema IN ('{{ \"', '\".join(current_model_locations[database].keys()) }}')\n AND NOT (\n {%- for schema in current_model_locations[database].keys() -%}\n {% if loop.index > 1 %}\n OR {% endif %} table_schema = '{{ schema }}' AND table_name IN ('{{ \"', '\".join(current_model_locations[database][schema]) }}')\n {%- endfor %}\n )\n {%- endfor -%}\n )\n -- Create the DROP statments to be executed in the database\n SELECT 'DROP ' || relation_type || ' IF EXISTS ' || table_catalog || '.' ||table_schema || '.' || table_name || ';' AS drop_commands\n FROM models_to_drop\n WHERE relation_type IS NOT NULL\n\n {%- endset -%}\n\n {{ print(cleanup_query) }}\n\n -- Execute the DROP statments above\n {%- set drop_commands = run_query(cleanup_query) -%}\n\n {%- if drop_commands -%}\n\n {%- for drop_command in drop_commands.columns[0].values() -%}\n {% if loop.first %}\n {{ print('Drop commands:') }}\n {{ print('-' * 20) }}\n {% endif %}\n\n {%- do print(drop_command) -%}\n\n {%- if not dry_run -%}\n {%- do run_query(drop_command) -%}\n {{ print('Drop command executed') }}\n {%- endif -%}\n\n {%- endfor -%}\n\n {{ print('-' * 20) }}\n\n {%- if dry_run -%}\n {{ print('Dry run; orphaned relations still in database') }}\n {%- else -%}\n {{ print('Done dropping orphaned relations from database') }}\n {%- endif -%}\n\n {%- else -%}\n\n {{ print('No orphaned relations found') }}\n\n {%- endif -%}\n\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.096336, + "supported_languages": null + }, + "macro.balboa.materialization_materialized_view_snowflake": { + "name": "materialization_materialized_view_snowflake", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/materializations/materialized_view_materialization.sql", + "original_file_path": "macros/materializations/materialized_view_materialization.sql", + "unique_id": "macro.balboa.materialization_materialized_view_snowflake", + "macro_sql": "{% materialization materialized_view, adapter='snowflake' -%}\n\n {% set original_query_tag = set_query_tag() %}\n\n {% set full_refresh_mode = (should_full_refresh()) %}\n\n {% set target_relation = this %}\n {% set existing_relation = load_relation(this) %}\n {% set tmp_relation = make_temp_relation(this) %}\n\n {{ run_hooks(pre_hooks) }}\n\n {% if (existing_relation is none or full_refresh_mode) %}\n {% set build_sql = dbt_labs_materialized_views.create_materialized_view_as(target_relation, sql, config) %}\n {% elif existing_relation.is_view or existing_relation.is_table %}\n {#-- Can't overwrite a view with a table - we must drop --#}\n {{ log(\"Dropping relation \" ~ target_relation ~ \" because it is a \" ~ existing_relation.type ~ \" and this model is a materialized view.\") }}\n {% do adapter.drop_relation(existing_relation) %}\n {% set build_sql = dbt_labs_materialized_views.create_materialized_view_as(target_relation, sql, config) %}\n {% else %}\n {# noop #}\n {% endif %}\n \n {% if build_sql %}\n {% call statement(\"main\") %}\n {{ build_sql }}\n {% endcall %}\n {% else %}\n {{ store_result('main', 'SKIP') }}\n {% endif %}\n\n {{ run_hooks(post_hooks) }}\n \n {% do persist_docs(target_relation, model) %}\n \n {% do unset_query_tag(original_query_tag) %}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.should_full_refresh", + "macro.dbt.load_relation", + "macro.dbt.make_temp_relation", + "macro.dbt.run_hooks", + "macro.dbt.statement", + "macro.dbt.persist_docs", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.09867, + "supported_languages": ["sql"] + }, + "macro.balboa.ingest_external_table": { + "name": "ingest_external_table", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/ingestion/external_stage_to_table.sql", + "original_file_path": "macros/ingestion/external_stage_to_table.sql", + "unique_id": "macro.balboa.ingest_external_table", + "macro_sql": "{% macro ingest_external_table(external_source_name) %}\n {#\n Inspired by dbt-external-tables macro\n https://github.com/dbt-labs/dbt-external-tables/blob/main/macros/plugins/snowflake/snowpipe/get_copy_sql.sql\n #}\n\n {% set target_nodes = [] %}\n {#\n Brute for loop through nodes to grab the one we want.\n This can be extended to support multiple sources, nodes, etc\n #}\n {% set source_nodes = graph.sources.values() if graph.sources else [] %}\n {% for node in source_nodes %}\n {% if node.source_name == external_source_name.strip() %}\n {{ log(\"Node acquired\") }}\n {% do target_nodes.append(node) %}\n {% endif %}\n {% endfor %}\n\n {% if target_nodes | length == 0 %}\n {{ log(\"Missing target node - please check it matches existing source\") }}\n {{ return(none) }}\n {% endif %}\n\n {{ log(\"Executing copy from stage\") }}\n {%- set columns = target_nodes[0].columns.values() -%}\n {%- set external = target_nodes[0].external -%}\n {%- set is_csv = dbt_external_tables.is_csv(external.file_format) %}\n {%- set explicit_transaction = true -%}\n {%- set copy_into_target = source(target_nodes[0].source_name, target_nodes[0].name) -%}\n\n {{ log(\"Creating table if not exists\") }}\n {%- call statement('create_table', fetch_result=True) %}\n create table if not exists {{ copy_into_target }}\n (\n value variant,\n filename varchar,\n rownumber integer,\n _dbt_copied_at timestamp\n );\n {% endcall %}\n\n {%- if explicit_transaction -%} begin; {%- endif %}\n\n {{ log(\"Executing copy from stage\") }}\n {%- call statement('copy_execution', fetch_result=True) %}\n copy into {{ copy_into_target }}\n from (\n select\n {% if columns|length == 0 %}\n $1::variant as value,\n {% else -%}\n {%- for column in columns -%}\n {%- set col_expression -%}\n {%- if is_csv -%}nullif($ {{ loop.index }},'') {# special case: get columns by ordinal position #}\n {%- else -%}nullif($1:{{ column.name }},'') {# standard behavior: get columns by name #}\n {%- endif -%}\n {%- endset -%}\n {{ col_expression }}::{{ column.data_type }} as {{ column.name }},\n {% endfor -%}\n {% endif %}\n metadata$filename::varchar as metadata_filename,\n metadata$file_row_number::bigint as metadata_file_row_number,\n current_timestamp::timestamp as _dbt_copied_at\n from {{ external.location }} {# stage #}\n )\n file_format = {{ external.file_format }}\n {% if external.pattern -%} pattern = '{{ external.pattern }}' {%- endif %}\n ;\n {% if explicit_transaction -%} commit; {%- endif -%}\n {%- endcall -%}\n\n {{ log(\"Executed copy from stage. Exiting macro...\") }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_external_tables.is_csv", "macro.dbt.statement"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.102266, + "supported_languages": null + }, + "macro.balboa.create_update_masking_policy_pii": { + "name": "create_update_masking_policy_pii", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/masking_policies/create_update_masking_policy_pii.sql", + "original_file_path": "macros/masking_policies/create_update_masking_policy_pii.sql", + "unique_id": "macro.balboa.create_update_masking_policy_pii", + "macro_sql": "{% macro create_update_masking_policy_pii(\n node_database,\n node_schema,\n data_type,\n mask_value,\n is_in_use=false )\n%}\n\n {% if mask_value is string %}\n {% set mask_value = \"\\'\" + mask_value + \"\\'\"%}\n {% endif %}\n\n {% if (data_type | upper) == 'VARIANT' %}\n {% set a, b = mask_value %}\n {% set mask_value = \"OBJECT_CONSTRUCT(\\'\" + a + \"\\',\\'\" + b + \"\\')\" %}\n {{print(mask_value)}}\n {% endif %}\n\n\n {% set masking_policy_db = var(\"common_masking_policy_db\") %}\n {% set masking_policy_schema = var(\"common_masking_policy_schema\") %}\n\n {% set sql = '' %}\n\n {% set sql %}\n {% if is_in_use %}\n alter masking policy {{ masking_policy_db }}.{{ masking_policy_schema }}.masking_policy_pii_{{data_type}} set body ->\n {% else %}\n create or replace masking policy {{ masking_policy_db }}.{{ masking_policy_schema }}.masking_policy_pii_{{data_type}} as (val {{data_type}})\n returns {{data_type}} ->\n {% endif %}\n {% endset %}\n\n {% set sql %}\n {{ sql }}\n case\n when is_role_in_session('Z_POLICY_UNMASK_PII') then val\n else {{ mask_value }}\n end\n {% endset %}\n\n {{ return(sql) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.104141, + "supported_languages": null + }, + "macro.balboa.masking_policy_list": { + "name": "masking_policy_list", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/masking_policies/masking_policy_list.sql", + "original_file_path": "macros/masking_policies/masking_policy_list.sql", + "unique_id": "macro.balboa.masking_policy_list", + "macro_sql": "{% macro masking_policy_list() %}\n\n {% set masking_policy_db = var(\"common_masking_policy_db\") %}\n {% set masking_policy_schema = var(\"common_masking_policy_schema\") %}\n\n {% set sql %}\n SHOW MASKING POLICIES in {{ masking_policy_db }}.{{ masking_policy_schema }};\n {% endset %}\n\n {% set result = run_query(sql) %}\n {{ return( result.columns['name'].values() | list ) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.10479, + "supported_languages": null + }, + "macro.balboa.masking_policy_in_use": { + "name": "masking_policy_in_use", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/masking_policies/masking_policy_in_use.sql", + "original_file_path": "macros/masking_policies/masking_policy_in_use.sql", + "unique_id": "macro.balboa.masking_policy_in_use", + "macro_sql": "{% macro masking_policy_in_use(policy_name) %}\n\n {% set sql %}\n select POLICY_NAME\n from table( information_schema.policy_references( policy_name => '{{policy_name}}' ))\n group by 1;\n {% endset %}\n\n {% set result = run_query(sql) %}\n\n {% if result %}\n {{ return(true) }}\n {% else %}\n {{ return(false) }}\n {% endif %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.105522, + "supported_languages": null + }, + "macro.balboa.create_all_masking_policies": { + "name": "create_all_masking_policies", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/masking_policies/create_all_masking_policies.sql", + "original_file_path": "macros/masking_policies/create_all_masking_policies.sql", + "unique_id": "macro.balboa.create_all_masking_policies", + "macro_sql": "{% macro create_all_masking_policies() %}\n\n {# Defines the policy types and the return value #}\n {% set policies = [\n ('string' , \"********\"),\n ('float' , 0.00),\n ('number' , 0),\n ('date' , '0001-01-01 00:00:00.000' ),\n ('timestamp_tz', '0001-01-01 00:00:00.000 +0000' ),\n ('variant', (\"masked\",\"data\"))\n ] %}\n\n {% set masking_policy_db = var(\"common_masking_policy_db\") %}\n {% set masking_policy_schema = var(\"common_masking_policy_schema\") %}\n\n {% set existing_policies = ( masking_policy_list() ) %}\n\n {% for data_type, mask_value in policies %}\n\n {% set policy_name = (\"masking_policy_pii_\" ~ data_type | string) | upper %}\n {% set fully_qualified_policy_name = masking_policy_db + '.' + masking_policy_schema + '.' + policy_name %}\n\n {% if policy_name in existing_policies%}\n {% set policy_currently_in_use = masking_policy_in_use(fully_qualified_policy_name) %}\n {% else %}\n {% set policy_currently_in_use = false %}\n {% endif %}\n\n {% set result = run_query(\n create_update_masking_policy_pii(\n masking_policy_db,\n masking_policy_schema,\n data_type,\n mask_value,\n policy_currently_in_use\n )\n ) %}\n\n {% endfor %}\n\n {{ print(\"The following masking policies were created / updated: \")}}\n {{ print( masking_policy_list() ) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.balboa.masking_policy_list", + "macro.balboa.masking_policy_in_use", + "macro.dbt.run_query", + "macro.balboa.create_update_masking_policy_pii" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1078122, + "supported_languages": null + }, + "macro.balboa.test_check_critical_rows_exist_in_seed": { + "name": "test_check_critical_rows_exist_in_seed", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tests/check_critical_rows_exist_in_seed.sql", + "original_file_path": "macros/tests/check_critical_rows_exist_in_seed.sql", + "unique_id": "macro.balboa.test_check_critical_rows_exist_in_seed", + "macro_sql": "{% test check_critical_rows_exist_in_seed(model, compare_seed, compare_columns=None) %}\n\n{#-- Prevent querying of db in parsing mode. This works because this macro does not create any new refs. #}\n{%- if not execute -%}\n {{ return('') }}\n{% endif %}\n\n-- setup\n\n\n{#\n{%- do dbt_utils._is_relation(model, 'check_critical_rows_exist_in_seed') -%}\nThis was in the dbt_utils model I modeled this one after, but doesn't run here.\n#}\n\n{#-\nIf the compare_cols arg is provided, we can run this test without querying the\ninformation schema \u2014 this allows the model to be an ephemeral model\n-#}\n\n{%- if not compare_columns -%}\n {%- do dbt_utils._is_ephemeral(model, 'check_critical_rows_exist_in_seed') -%}\n {%- set compare_columns = adapter.get_columns_in_relation(model) | map(attribute='quoted') -%}\n{%- endif -%}\n\n{% set compare_cols_csv = compare_columns | join(', ') %}\n\nwith a as (\n\n select * from {{ model }}\n\n),\n\nb as (\n\n select * from {{ compare_seed }}\n\n),\n\nb_minus_a as (\n\n select {{compare_cols_csv}} from b\n {{ dbt_utils.except() }}\n select {{compare_cols_csv}} from a\n\n)\n\nselect * from b_minus_a\n\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils._is_ephemeral", "macro.dbt.except"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.108829, + "supported_languages": null + }, + "macro.balboa.test_expect_keys_to_exist_in_json": { + "name": "test_expect_keys_to_exist_in_json", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tests/expect_keys_to_exist_in_json.sql", + "original_file_path": "macros/tests/expect_keys_to_exist_in_json.sql", + "unique_id": "macro.balboa.test_expect_keys_to_exist_in_json", + "macro_sql": "{% test expect_keys_to_exist_in_json(model, column_name, value_set, rows_to_check=1) %}\n\nwith all_values as (\n select distinct trim(value,'\"') as value_field\n from (\n select *\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n limit {{ rows_to_check }}\n ) source,\n lateral flatten(object_keys({{column_name}}))\n),\n\nset_values as (\n\n {% for value in value_set -%}\n select\n '{{ value }}' as value_field\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n),\n\nvalidation_errors as (\n -- values from the set that are not in the json\n select\n s.value_field\n from\n set_values s\n left join\n all_values v on v.value_field = s.value_field\n where\n v.value_field is null\n)\n\nselect *\nfrom validation_errors\n\n{% endtest %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1096709, + "supported_languages": null + }, + "macro.balboa.store_test_results": { + "name": "store_test_results", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tests/store_test_results.sql", + "original_file_path": "macros/tests/store_test_results.sql", + "unique_id": "macro.balboa.store_test_results", + "macro_sql": "{% macro store_test_results(results) %}\n {%- set test_results = [] -%}\n\n {%- for result in results if result.node.resource_type == 'test' -%}\n {%- set test_results = test_results.append(result) -%}\n {%- endfor -%}\n\n {% if test_results|length == 0 -%}\n {{ log(\"store_test_results found no test results to process.\") if execute }}\n {{ return('') }}\n {% endif -%}\n\n {%- set central_tbl -%} {{ target.schema }}.test_results_central {%- endset -%}\n {%- set history_tbl -%} {{ target.schema }}.test_results_history {%- endset -%}\n\n {{ log(\"Centralizing \" ~ test_results|length ~ \" test results in \" + central_tbl, info = true) if execute }}\n {{ log(test_results, info=true) }}\n create or replace table {{ central_tbl }} as (\n\n {%- for result in test_results %}\n\n {%- set test_name = '' -%}\n {%- set test_type = '' -%}\n {%- set column_name = '' -%}\n\n {%- if result.node.test_metadata is defined -%}\n {%- set test_name = result.node.test_metadata.name -%}\n {%- set test_type = 'generic' -%}\n\n {%- if test_name == 'relationships' -%}\n {%- set column_name = result.node.test_metadata.kwargs.field ~ ',' ~ result.node.test_metadata.kwargs.column_name -%}\n {%- else -%}\n {%- set column_name = result.node.test_metadata.kwargs.column_name -%}\n {%- endif -%}\n {%- elif result.node.name is defined -%}\n {%- set test_name = result.node.name -%}\n {%- set test_type = 'singular' -%}\n {%- endif %}\n\n select\n '{{ test_name }}'::text as test_name,\n '{{ result.node.config.severity }}'::text as test_severity_config,\n '{{ result.status }}'::text as test_result,\n '{{ process_refs(result.node.refs) }}'::text as model_refs,\n '{{ process_refs(result.node.sources, is_src=true) }}'::text as source_refs,\n '{{ column_name|escape }}'::text as column_names,\n '{{ result.node.name }}'::text as test_name_long,\n '{{ test_type }}'::text as test_type,\n '{{ result.execution_time }}'::text as execution_time_seconds,\n '{{ result.node.original_file_path }}'::text as file_test_defined,\n '{{ var(\"pipeline_name\", \"variable_not_set\") }}'::text as pipeline_name,\n '{{ var(\"pipeline_type\", \"variable_not_set\") }}'::text as pipeline_type,\n '{{ target.name }}'::text as dbt_cloud_target_name,\n '{{ env_var(\"DBT_CLOUD_PROJECT_ID\", \"manual\") }}'::text as _audit_project_id,\n '{{ env_var(\"DBT_CLOUD_JOB_ID\", \"manual\") }}'::text as _audit_job_id,\n '{{ env_var(\"DBT_CLOUD_RUN_ID\", \"manual\") }}'::text as _audit_run_id,\n '{{ env_var(\"DBT_CLOUD_URL\", \"https://cloud.getdbt.com/#/accounts/account_id/projects/\") }}'||_audit_project_id||'/runs/'||_audit_run_id::text as _audit_run_url,\n current_timestamp as _timestamp\n {{ \"union all\" if not loop.last }}\n\n {%- endfor %}\n\n );\n\n {% if target.name != 'default' %}\n create table if not exists {{ history_tbl }} as (\n select\n {{ dbt_utils.generate_surrogate_key([\"test_name\", \"test_result\", \"_timestamp\"]) }} as sk_id,\n *\n from {{ central_tbl }}\n where false\n );\n\n insert into {{ history_tbl }}\n select\n {{ dbt_utils.generate_surrogate_key([\"test_name\", \"test_result\", \"_timestamp\"]) }} as sk_id,\n *\n from {{ central_tbl }}\n ;\n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.balboa.process_refs", + "macro.dbt_utils.generate_surrogate_key" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.117867, + "supported_languages": null + }, + "macro.balboa.process_refs": { + "name": "process_refs", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/tests/store_test_results.sql", + "original_file_path": "macros/tests/store_test_results.sql", + "unique_id": "macro.balboa.process_refs", + "macro_sql": "{% macro process_refs( ref_list, is_src=false ) %}\n {% set refs = [] %}\n\n {% if ref_list is defined and ref_list|length > 0 %}\n {% for ref in ref_list %}\n {% if is_src %}\n {{ refs.append(ref|join('.')) }}\n {% else %}\n {{ refs.append(ref[0]) }}\n {% endif %}\n {% endfor %}\n\n {{ return(refs|join(',')) }}\n {% else %}\n {{ return('') }}\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1187332, + "supported_languages": null + }, + "macro.balboa.create_row_access_policy_region": { + "name": "create_row_access_policy_region", + "resource_type": "macro", + "package_name": "balboa", + "path": "macros/row_access_policy/create_row_access_policy_deu_unmask.sql", + "original_file_path": "macros/row_access_policy/create_row_access_policy_deu_unmask.sql", + "unique_id": "macro.balboa.create_row_access_policy_region", + "macro_sql": "{% macro create_row_access_policy_region(node_database,node_schema) %}\n{#\n This should be run by dbt-snow-mask, but as this does not currently support row masking,\n is is manually run for now.\n\n To run:\n dbt run-operation create_row_access_policy_region --args '{node_database: prd_commercial_dw, node_schema: source_marketedge}'\n\n To apply to a table / view:\n alter [table/view] add row access policy z_policy_row_region_de on [country code column]\n\n alter view MARKETEDGE_INTERNAL add row access policy z_policy_row_region on (ISO3_COUNTRY_CODE);\n\n Policy must be dropped from table / view in order to recreate it\n alter [table/view] drop row access policy z_policy_row_region_de\n\n alter view MARKETEDGE_INTERNAL drop row access policy z_policy_row_region;\n\n#}\n\n{# Rows with removal comments should be removed when transitioning to the dbt-snow-mask approach. #}\n\n {% set create_policy_sql %} {# to remove #}\n use role securityadmin;\n create or replace row access policy {{node_database}}.{{node_schema}}.z_policy_row_region\n as (country_code string)\n returns boolean ->\n case\n when is_role_in_session('Z_POLICY_ROW_REGION_ALL') then true\n when country_code = 'DEU' AND is_role_in_session('Z_POLICY_ROW_REGION_DE') then true\n else false\n end;\n grant apply on row access policy {{node_database}}.{{node_schema}}.z_policy_row_region to role transformer_dbt;\n {% endset %} {# to remove #}\n {% do run_query(create_policy_sql) %} {# to remove #}\n {{ log(\"Created policy: \" ~ create_policy_sql, info=true) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.119582, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_catalog": { + "name": "snowflake__get_catalog", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/catalog.sql", + "original_file_path": "macros/catalog.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_catalog", + "macro_sql": "{% macro snowflake__get_catalog(information_schema, schemas) -%}\n {% set query %}\n with tables as (\n\n select\n table_catalog as \"table_database\",\n table_schema as \"table_schema\",\n table_name as \"table_name\",\n coalesce(table_type, 'DYNAMIC TABLE') as \"table_type\",\n comment as \"table_comment\",\n\n -- note: this is the _role_ that owns the table\n table_owner as \"table_owner\",\n\n 'Clustering Key' as \"stats:clustering_key:label\",\n clustering_key as \"stats:clustering_key:value\",\n 'The key used to cluster this table' as \"stats:clustering_key:description\",\n (clustering_key is not null) as \"stats:clustering_key:include\",\n\n 'Row Count' as \"stats:row_count:label\",\n row_count as \"stats:row_count:value\",\n 'An approximate count of rows in this table' as \"stats:row_count:description\",\n (row_count is not null) as \"stats:row_count:include\",\n\n 'Approximate Size' as \"stats:bytes:label\",\n bytes as \"stats:bytes:value\",\n 'Approximate size of the table as reported by Snowflake' as \"stats:bytes:description\",\n (bytes is not null) as \"stats:bytes:include\",\n\n 'Last Modified' as \"stats:last_modified:label\",\n to_varchar(convert_timezone('UTC', last_altered), 'yyyy-mm-dd HH24:MI'||'UTC') as \"stats:last_modified:value\",\n 'The timestamp for last update/change' as \"stats:last_modified:description\",\n (last_altered is not null and table_type='BASE TABLE') as \"stats:last_modified:include\"\n\n from {{ information_schema }}.tables\n where (\n {%- for schema in schemas -%}\n upper(\"table_schema\") = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n )\n\n ),\n\n columns as (\n\n select\n table_catalog as \"table_database\",\n table_schema as \"table_schema\",\n table_name as \"table_name\",\n\n column_name as \"column_name\",\n ordinal_position as \"column_index\",\n data_type as \"column_type\",\n comment as \"column_comment\"\n\n from {{ information_schema }}.columns\n where (\n {%- for schema in schemas -%}\n upper(\"table_schema\") = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n )\n )\n\n select *\n from tables\n join columns using (\"table_database\", \"table_schema\", \"table_name\")\n order by \"column_index\"\n {%- endset -%}\n\n {{ return(run_query(query)) }}\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.121192, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__create_table_as": { + "name": "snowflake__create_table_as", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__create_table_as", + "macro_sql": "{% macro snowflake__create_table_as(temporary, relation, compiled_code, language='sql') -%}\n {%- if language == 'sql' -%}\n {%- set transient = config.get('transient', default=true) -%}\n {%- set cluster_by_keys = config.get('cluster_by', default=none) -%}\n {%- set enable_automatic_clustering = config.get('automatic_clustering', default=false) -%}\n {%- set copy_grants = config.get('copy_grants', default=false) -%}\n\n {%- if cluster_by_keys is not none and cluster_by_keys is string -%}\n {%- set cluster_by_keys = [cluster_by_keys] -%}\n {%- endif -%}\n {%- if cluster_by_keys is not none -%}\n {%- set cluster_by_string = cluster_by_keys|join(\", \")-%}\n {% else %}\n {%- set cluster_by_string = none -%}\n {%- endif -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create or replace {% if temporary -%}\n temporary\n {%- elif transient -%}\n transient\n {%- endif %} table {{ relation }}\n {%- set contract_config = config.get('contract') -%}\n {%- if contract_config.enforced -%}\n {{ get_assert_columns_equivalent(sql) }}\n {{ get_table_columns_and_constraints() }}\n {% set compiled_code = get_select_subquery(compiled_code) %}\n {% endif %}\n {% if copy_grants and not temporary -%} copy grants {%- endif %} as\n (\n {%- if cluster_by_string is not none -%}\n select * from (\n {{ compiled_code }}\n ) order by ({{ cluster_by_string }})\n {%- else -%}\n {{ compiled_code }}\n {%- endif %}\n );\n {% if cluster_by_string is not none and not temporary -%}\n alter table {{relation}} cluster by ({{cluster_by_string}});\n {%- endif -%}\n {% if enable_automatic_clustering and cluster_by_string is not none and not temporary -%}\n alter table {{relation}} resume recluster;\n {%- endif -%}\n\n {%- elif language == 'python' -%}\n {{ py_write_table(compiled_code=compiled_code, target_relation=relation, temporary=temporary) }}\n {%- else -%}\n {% do exceptions.raise_compiler_error(\"snowflake__create_table_as macro didn't get supported language, it got %s\" % language) %}\n {%- endif -%}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_assert_columns_equivalent", + "macro.dbt.get_table_columns_and_constraints", + "macro.dbt.get_select_subquery", + "macro.dbt_snowflake.py_write_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1402738, + "supported_languages": null + }, + "macro.dbt_snowflake.get_column_comment_sql": { + "name": "get_column_comment_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.get_column_comment_sql", + "macro_sql": "{% macro get_column_comment_sql(column_name, column_dict) -%}\n {% if (column_name|upper in column_dict) -%}\n {% set matched_column = column_name|upper -%}\n {% elif (column_name|lower in column_dict) -%}\n {% set matched_column = column_name|lower -%}\n {% elif (column_name in column_dict) -%}\n {% set matched_column = column_name -%}\n {% else -%}\n {% set matched_column = None -%}\n {% endif -%}\n {% if matched_column -%}\n {{ adapter.quote(column_name) }} COMMENT $${{ column_dict[matched_column]['description'] | replace('$', '[$]') }}$$\n {%- else -%}\n {{ adapter.quote(column_name) }} COMMENT $$$$\n {%- endif -%}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1410549, + "supported_languages": null + }, + "macro.dbt_snowflake.get_persist_docs_column_list": { + "name": "get_persist_docs_column_list", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.get_persist_docs_column_list", + "macro_sql": "{% macro get_persist_docs_column_list(model_columns, query_columns) %}\n(\n {% for column_name in query_columns %}\n {{ get_column_comment_sql(column_name, model_columns) }}\n {{- \", \" if not loop.last else \"\" }}\n {% endfor %}\n)\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.get_column_comment_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.141349, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__create_view_as_with_temp_flag": { + "name": "snowflake__create_view_as_with_temp_flag", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__create_view_as_with_temp_flag", + "macro_sql": "{% macro snowflake__create_view_as_with_temp_flag(relation, sql, is_temporary=False) -%}\n {%- set secure = config.get('secure', default=false) -%}\n {%- set copy_grants = config.get('copy_grants', default=false) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n create or replace {% if secure -%}\n secure\n {%- endif %} {% if is_temporary -%}\n temporary\n {%- endif %} view {{ relation }}\n {% if config.persist_column_docs() -%}\n {% set model_columns = model.columns %}\n {% set query_columns = get_columns_in_query(sql) %}\n {{ get_persist_docs_column_list(model_columns, query_columns) }}\n\n {%- endif %}\n {%- set contract_config = config.get('contract') -%}\n {%- if contract_config.enforced -%}\n {{ get_assert_columns_equivalent(sql) }}\n {%- endif %}\n {% if copy_grants -%} copy grants {%- endif %} as (\n {{ sql }}\n );\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_columns_in_query", + "macro.dbt_snowflake.get_persist_docs_column_list", + "macro.dbt.get_assert_columns_equivalent" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.142365, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__create_view_as": { + "name": "snowflake__create_view_as", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__create_view_as", + "macro_sql": "{% macro snowflake__create_view_as(relation, sql) -%}\n {{ snowflake__create_view_as_with_temp_flag(relation, sql) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__create_view_as_with_temp_flag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.142524, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_columns_in_relation": { + "name": "snowflake__get_columns_in_relation", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_columns_in_relation", + "macro_sql": "{% macro snowflake__get_columns_in_relation(relation) -%}\n {%- set sql -%}\n describe table {{ relation }}\n {%- endset -%}\n {%- set result = run_query(sql) -%}\n\n {% set maximum = 10000 %}\n {% if (result | length) >= maximum %}\n {% set msg %}\n Too many columns in relation {{ relation }}! dbt can only get\n information about relations with fewer than {{ maximum }} columns.\n {% endset %}\n {% do exceptions.raise_compiler_error(msg) %}\n {% endif %}\n\n {% set columns = [] %}\n {% for row in result %}\n {% do columns.append(api.Column.from_description(row['name'], row['type'])) %}\n {% endfor %}\n {% do return(columns) %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.143312, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__list_schemas": { + "name": "snowflake__list_schemas", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__list_schemas", + "macro_sql": "{% macro snowflake__list_schemas(database) -%}\n {# 10k limit from here: https://docs.snowflake.net/manuals/sql-reference/sql/show-schemas.html#usage-notes #}\n {% set maximum = 10000 %}\n {% set sql -%}\n show terse schemas in database {{ database }}\n limit {{ maximum }}\n {%- endset %}\n {% set result = run_query(sql) %}\n {% if (result | length) >= maximum %}\n {% set msg %}\n Too many schemas in database {{ database }}! dbt can only get\n information about databases with fewer than {{ maximum }} schemas.\n {% endset %}\n {% do exceptions.raise_compiler_error(msg) %}\n {% endif %}\n {{ return(result) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.143908, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_paginated_relations_array": { + "name": "snowflake__get_paginated_relations_array", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_paginated_relations_array", + "macro_sql": "{% macro snowflake__get_paginated_relations_array(max_iter, max_results_per_iter, max_total_results, schema_relation, watermark) %}\n\n {% set paginated_relations = [] %}\n\n {% for _ in range(0, max_iter) %}\n\n {%- set paginated_sql -%}\n show terse objects in {{ schema_relation }} limit {{ max_results_per_iter }} from '{{ watermark.table_name }}'\n {%- endset -%}\n\n {%- set paginated_result = run_query(paginated_sql) %}\n {%- set paginated_n = (paginated_result | length) -%}\n\n {#\n terminating condition: if there are 0 records in the result we reached\n the end exactly on the previous iteration\n #}\n {%- if paginated_n == 0 -%}\n {%- break -%}\n {%- endif -%}\n\n {#\n terminating condition: At some point the user needs to be reasonable with how\n many objects are contained in their schemas. Since there was already\n one iteration before attempting pagination, loop.index == max_iter means\n the limit has been surpassed.\n #}\n\n {%- if loop.index == max_iter -%}\n {%- set msg -%}\n dbt will list a maximum of {{ max_total_results }} objects in schema {{ schema_relation }}.\n Your schema exceeds this limit. Please contact support@getdbt.com for troubleshooting tips,\n or review and reduce the number of objects contained.\n {%- endset -%}\n\n {% do exceptions.raise_compiler_error(msg) %}\n {%- endif -%}\n\n {%- do paginated_relations.append(paginated_result) -%}\n {% set watermark.table_name = paginated_result.columns[1].values()[-1] %}\n\n {#\n terminating condition: paginated_n < max_results_per_iter means we reached the end\n #}\n {%- if paginated_n < max_results_per_iter -%}\n {%- break -%}\n {%- endif -%}\n {%- endfor -%}\n\n {{ return(paginated_relations) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.144979, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__list_relations_without_caching": { + "name": "snowflake__list_relations_without_caching", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__list_relations_without_caching", + "macro_sql": "{% macro snowflake__list_relations_without_caching(schema_relation, max_iter=10, max_results_per_iter=10000) %}\n\n {%- set max_total_results = max_results_per_iter * max_iter -%}\n\n {%- set sql -%}\n show terse objects in {{ schema_relation }} limit {{ max_results_per_iter }}\n {%- endset -%}\n\n {%- set result = run_query(sql) -%}\n\n {%- set n = (result | length) -%}\n {%- set watermark = namespace(table_name=result.columns[1].values()[-1]) -%}\n {%- set paginated = namespace(result=[]) -%}\n\n {% if n >= max_results_per_iter %}\n\n {% set paginated.result = snowflake__get_paginated_relations_array(\n max_iter,\n max_results_per_iter,\n max_total_results,\n schema_relation,\n watermark\n )\n %}\n\n {% endif %}\n\n {%- set all_results_array = [result] + paginated.result -%}\n {%- set result = result.merge(all_results_array) -%}\n {%- do return(result) -%}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.run_query", + "macro.dbt_snowflake.snowflake__get_paginated_relations_array" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1460109, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__check_schema_exists": { + "name": "snowflake__check_schema_exists", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__check_schema_exists", + "macro_sql": "{% macro snowflake__check_schema_exists(information_schema, schema) -%}\n {% call statement('check_schema_exists', fetch_result=True) -%}\n select count(*)\n from {{ information_schema }}.schemata\n where upper(schema_name) = upper('{{ schema }}')\n and upper(catalog_name) = upper('{{ information_schema.database }}')\n {%- endcall %}\n {{ return(load_result('check_schema_exists').table) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.146361, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__rename_relation": { + "name": "snowflake__rename_relation", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__rename_relation", + "macro_sql": "{% macro snowflake__rename_relation(from_relation, to_relation) -%}\n {% call statement('rename_relation') -%}\n alter table {{ from_relation }} rename to {{ to_relation }}\n {%- endcall %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.14657, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__alter_column_type": { + "name": "snowflake__alter_column_type", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__alter_column_type", + "macro_sql": "{% macro snowflake__alter_column_type(relation, column_name, new_column_type) -%}\n {% call statement('alter_column_type') %}\n alter table {{ relation }} alter {{ adapter.quote(column_name) }} set data type {{ new_column_type }};\n {% endcall %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.146841, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__alter_relation_comment": { + "name": "snowflake__alter_relation_comment", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__alter_relation_comment", + "macro_sql": "{% macro snowflake__alter_relation_comment(relation, relation_comment) -%}\n {%- if relation.is_dynamic_table -%}\n {%- set relation_type = 'dynamic table' -%}\n {%- else -%}\n {%- set relation_type = relation.type -%}\n {%- endif -%}\n comment on {{ relation_type }} {{ relation }} IS $${{ relation_comment | replace('$', '[$]') }}$$;\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.147198, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__alter_column_comment": { + "name": "snowflake__alter_column_comment", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__alter_column_comment", + "macro_sql": "{% macro snowflake__alter_column_comment(relation, column_dict) -%}\n {% set existing_columns = adapter.get_columns_in_relation(relation) | map(attribute=\"name\") | list %}\n {% if relation.is_dynamic_table -%}\n {% set relation_type = \"dynamic table\" %}\n {% else -%}\n {% set relation_type = relation.type %}\n {% endif %}\n alter {{ relation_type }} {{ relation }} alter\n {% for column_name in existing_columns if (column_name in existing_columns) or (column_name|lower in existing_columns) %}\n {{ get_column_comment_sql(column_name, column_dict) }} {{- ',' if not loop.last else ';' }}\n {% endfor %}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.get_column_comment_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1479309, + "supported_languages": null + }, + "macro.dbt_snowflake.get_current_query_tag": { + "name": "get_current_query_tag", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.get_current_query_tag", + "macro_sql": "{% macro get_current_query_tag() -%}\n {{ return(run_query(\"show parameters like 'query_tag' in session\").rows[0]['value']) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.148129, + "supported_languages": null + }, + "macro.dbt_snowflake.set_query_tag": { + "name": "set_query_tag", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.set_query_tag", + "macro_sql": "{% macro set_query_tag() -%}\n {{ return(adapter.dispatch('set_query_tag', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__set_query_tag"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1483011, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__set_query_tag": { + "name": "snowflake__set_query_tag", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__set_query_tag", + "macro_sql": "{% macro snowflake__set_query_tag() -%}\n {% set new_query_tag = config.get('query_tag') %}\n {% if new_query_tag %}\n {% set original_query_tag = get_current_query_tag() %}\n {{ log(\"Setting query_tag to '\" ~ new_query_tag ~ \"'. Will reset to '\" ~ original_query_tag ~ \"' after materialization.\") }}\n {% do run_query(\"alter session set query_tag = '{}'\".format(new_query_tag)) %}\n {{ return(original_query_tag)}}\n {% endif %}\n {{ return(none)}}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.get_current_query_tag", + "macro.dbt.run_query" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.148795, + "supported_languages": null + }, + "macro.dbt_snowflake.unset_query_tag": { + "name": "unset_query_tag", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.unset_query_tag", + "macro_sql": "{% macro unset_query_tag(original_query_tag) -%}\n {{ return(adapter.dispatch('unset_query_tag', 'dbt')(original_query_tag)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__unset_query_tag"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.148988, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__unset_query_tag": { + "name": "snowflake__unset_query_tag", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__unset_query_tag", + "macro_sql": "{% macro snowflake__unset_query_tag(original_query_tag) -%}\n {% set new_query_tag = config.get('query_tag') %}\n {% if new_query_tag %}\n {% if original_query_tag %}\n {{ log(\"Resetting query_tag to '\" ~ original_query_tag ~ \"'.\") }}\n {% do run_query(\"alter session set query_tag = '{}'\".format(original_query_tag)) %}\n {% else %}\n {{ log(\"No original query_tag, unsetting parameter.\") }}\n {% do run_query(\"alter session unset query_tag\") %}\n {% endif %}\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.149503, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__alter_relation_add_remove_columns": { + "name": "snowflake__alter_relation_add_remove_columns", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__alter_relation_add_remove_columns", + "macro_sql": "{% macro snowflake__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}\n\n {% if relation.is_dynamic_table -%}\n {% set relation_type = \"dynamic table\" %}\n {% else -%}\n {% set relation_type = relation.type %}\n {% endif %}\n\n {% if add_columns %}\n\n {% set sql -%}\n alter {{ relation_type }} {{ relation }} add column\n {% for column in add_columns %}\n {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}\n {% endfor %}\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n {% endif %}\n\n {% if remove_columns %}\n\n {% set sql -%}\n alter {{ relation_type }} {{ relation }} drop column\n {% for column in remove_columns %}\n {{ column.name }}{{ ',' if not loop.last }}\n {% endfor %}\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n {% endif %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1504261, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake_dml_explicit_transaction": { + "name": "snowflake_dml_explicit_transaction", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake_dml_explicit_transaction", + "macro_sql": "{% macro snowflake_dml_explicit_transaction(dml) %}\n {#\n Use this macro to wrap all INSERT, MERGE, UPDATE, DELETE, and TRUNCATE\n statements before passing them into run_query(), or calling in the 'main' statement\n of a materialization\n #}\n {% set dml_transaction -%}\n begin;\n {{ dml }};\n commit;\n {%- endset %}\n\n {% do return(dml_transaction) %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1506612, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__truncate_relation": { + "name": "snowflake__truncate_relation", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__truncate_relation", + "macro_sql": "{% macro snowflake__truncate_relation(relation) -%}\n {% set truncate_dml %}\n truncate table {{ relation }}\n {% endset %}\n {% call statement('truncate_relation') -%}\n {{ snowflake_dml_explicit_transaction(truncate_dml) }}\n {%- endcall %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.statement", + "macro.dbt_snowflake.snowflake_dml_explicit_transaction" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.150913, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__drop_relation": { + "name": "snowflake__drop_relation", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/adapters.sql", + "original_file_path": "macros/adapters.sql", + "unique_id": "macro.dbt_snowflake.snowflake__drop_relation", + "macro_sql": "{% macro snowflake__drop_relation(relation) -%}\n {%- if relation.is_dynamic_table -%}\n {% call statement('drop_relation', auto_begin=False) -%}\n drop dynamic table if exists {{ relation }}\n {%- endcall %}\n {%- else -%}\n {{- default__drop_relation(relation) -}}\n {%- endif -%}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.statement", "macro.dbt.default__drop_relation"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.151202, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__copy_grants": { + "name": "snowflake__copy_grants", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/apply_grants.sql", + "original_file_path": "macros/apply_grants.sql", + "unique_id": "macro.dbt_snowflake.snowflake__copy_grants", + "macro_sql": "{% macro snowflake__copy_grants() %}\n {% set copy_grants = config.get('copy_grants', False) %}\n {{ return(copy_grants) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.15152, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__support_multiple_grantees_per_dcl_statement": { + "name": "snowflake__support_multiple_grantees_per_dcl_statement", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/apply_grants.sql", + "original_file_path": "macros/apply_grants.sql", + "unique_id": "macro.dbt_snowflake.snowflake__support_multiple_grantees_per_dcl_statement", + "macro_sql": "\n\n{%- macro snowflake__support_multiple_grantees_per_dcl_statement() -%}\n {{ return(False) }}\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.151639, + "supported_languages": null + }, + "macro.dbt_snowflake.materialization_test_snowflake": { + "name": "materialization_test_snowflake", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/test.sql", + "original_file_path": "macros/materializations/test.sql", + "unique_id": "macro.dbt_snowflake.materialization_test_snowflake", + "macro_sql": "{%- materialization test, adapter='snowflake' -%}\n\n {% set original_query_tag = set_query_tag() %}\n {% set relations = materialization_test_default() %}\n {% do unset_query_tag(original_query_tag) %}\n {{ return(relations) }}\n\n{%- endmaterialization -%}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.materialization_test_default", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1520069, + "supported_languages": ["sql"] + }, + "macro.dbt_snowflake.snowflake__get_merge_sql": { + "name": "snowflake__get_merge_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/merge.sql", + "original_file_path": "macros/materializations/merge.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_merge_sql", + "macro_sql": "{% macro snowflake__get_merge_sql(target, source_sql, unique_key, dest_columns, incremental_predicates) -%}\n\n {#\n Workaround for Snowflake not being happy with a merge on a constant-false predicate.\n When no unique_key is provided, this macro will do a regular insert. If a unique_key\n is provided, then this macro will do a proper merge instead.\n #}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute='name')) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {%- set dml -%}\n {%- if unique_key is none -%}\n\n {{ sql_header if sql_header is not none }}\n\n insert into {{ target }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ source_sql }}\n )\n\n {%- else -%}\n\n {{ default__get_merge_sql(target, source_sql, unique_key, dest_columns, incremental_predicates) }}\n\n {%- endif -%}\n {%- endset -%}\n\n {% do return(snowflake_dml_explicit_transaction(dml)) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_quoted_csv", + "macro.dbt.default__get_merge_sql", + "macro.dbt_snowflake.snowflake_dml_explicit_transaction" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.153291, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_delete_insert_merge_sql": { + "name": "snowflake__get_delete_insert_merge_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/merge.sql", + "original_file_path": "macros/materializations/merge.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_delete_insert_merge_sql", + "macro_sql": "{% macro snowflake__get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) %}\n {% set dml = default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) %}\n {% do return(snowflake_dml_explicit_transaction(dml)) %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.default__get_delete_insert_merge_sql", + "macro.dbt_snowflake.snowflake_dml_explicit_transaction" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1536179, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__snapshot_merge_sql": { + "name": "snowflake__snapshot_merge_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/merge.sql", + "original_file_path": "macros/materializations/merge.sql", + "unique_id": "macro.dbt_snowflake.snowflake__snapshot_merge_sql", + "macro_sql": "{% macro snowflake__snapshot_merge_sql(target, source, insert_cols) %}\n {% set dml = default__snapshot_merge_sql(target, source, insert_cols) %}\n {% do return(snowflake_dml_explicit_transaction(dml)) %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.default__snapshot_merge_sql", + "macro.dbt_snowflake.snowflake_dml_explicit_transaction" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.153887, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__load_csv_rows": { + "name": "snowflake__load_csv_rows", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/seed.sql", + "original_file_path": "macros/materializations/seed.sql", + "unique_id": "macro.dbt_snowflake.snowflake__load_csv_rows", + "macro_sql": "{% macro snowflake__load_csv_rows(model, agate_table) %}\n {% set batch_size = get_batch_size() %}\n {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}\n {% set bindings = [] %}\n\n {% set statements = [] %}\n\n {% for chunk in agate_table.rows | batch(batch_size) %}\n {% set bindings = [] %}\n\n {% for row in chunk %}\n {% do bindings.extend(row) %}\n {% endfor %}\n\n {% set sql %}\n insert into {{ this.render() }} ({{ cols_sql }}) values\n {% for row in chunk -%}\n ({%- for column in agate_table.column_names -%}\n %s\n {%- if not loop.last%},{%- endif %}\n {%- endfor -%})\n {%- if not loop.last%},{%- endif %}\n {%- endfor %}\n {% endset %}\n\n {% do adapter.add_query('BEGIN', auto_begin=False) %}\n {% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}\n {% do adapter.add_query('COMMIT', auto_begin=False) %}\n\n {% if loop.index0 == 0 %}\n {% do statements.append(sql) %}\n {% endif %}\n {% endfor %}\n\n {# Return SQL so we can render it out into the compiled files #}\n {{ return(statements[0]) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_batch_size", + "macro.dbt.get_seed_column_quoted_csv" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.156275, + "supported_languages": null + }, + "macro.dbt_snowflake.materialization_seed_snowflake": { + "name": "materialization_seed_snowflake", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/seed.sql", + "original_file_path": "macros/materializations/seed.sql", + "unique_id": "macro.dbt_snowflake.materialization_seed_snowflake", + "macro_sql": "{% materialization seed, adapter='snowflake' %}\n {% set original_query_tag = set_query_tag() %}\n\n {% set relations = materialization_seed_default() %}\n\n {% do unset_query_tag(original_query_tag) %}\n\n {{ return(relations) }}\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.materialization_seed_default", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1566029, + "supported_languages": ["sql"] + }, + "macro.dbt_snowflake.materialization_view_snowflake": { + "name": "materialization_view_snowflake", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/view.sql", + "original_file_path": "macros/materializations/view.sql", + "unique_id": "macro.dbt_snowflake.materialization_view_snowflake", + "macro_sql": "{% materialization view, adapter='snowflake' -%}\n\n {% set original_query_tag = set_query_tag() %}\n {% set to_return = create_or_replace_view() %}\n\n {% set target_relation = this.incorporate(type='view') %}\n\n {% do persist_docs(target_relation, model, for_columns=false) %}\n\n {% do unset_query_tag(original_query_tag) %}\n\n {% do return(to_return) %}\n\n{%- endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.create_or_replace_view", + "macro.dbt.persist_docs", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.157187, + "supported_languages": ["sql"] + }, + "macro.dbt_snowflake.materialization_table_snowflake": { + "name": "materialization_table_snowflake", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/table.sql", + "original_file_path": "macros/materializations/table.sql", + "unique_id": "macro.dbt_snowflake.materialization_table_snowflake", + "macro_sql": "{% materialization table, adapter='snowflake', supported_languages=['sql', 'python']%}\n\n {% set original_query_tag = set_query_tag() %}\n\n {%- set identifier = model['alias'] -%}\n {%- set language = model['language'] -%}\n\n {% set grant_config = config.get('grants') %}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n {%- set target_relation = api.Relation.create(identifier=identifier,\n schema=schema,\n database=database, type='table') -%}\n\n {{ run_hooks(pre_hooks) }}\n\n {#-- Drop the relation if it was a view to \"convert\" it in a table. This may lead to\n -- downtime, but it should be a relatively infrequent occurrence #}\n {% if old_relation is not none and not old_relation.is_table %}\n {{ log(\"Dropping relation \" ~ old_relation ~ \" because it is of type \" ~ old_relation.type) }}\n {{ drop_relation_if_exists(old_relation) }}\n {% endif %}\n\n {% call statement('main', language=language) -%}\n {{ create_table_as(False, target_relation, compiled_code, language) }}\n {%- endcall %}\n\n {{ run_hooks(post_hooks) }}\n\n {% set should_revoke = should_revoke(old_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% do unset_query_tag(original_query_tag) %}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.run_hooks", + "macro.dbt.drop_relation_if_exists", + "macro.dbt.statement", + "macro.dbt.create_table_as", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1598048, + "supported_languages": ["sql", "python"] + }, + "macro.dbt_snowflake.py_write_table": { + "name": "py_write_table", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/table.sql", + "original_file_path": "macros/materializations/table.sql", + "unique_id": "macro.dbt_snowflake.py_write_table", + "macro_sql": "{% macro py_write_table(compiled_code, target_relation, temporary=False) %}\n{{ compiled_code }}\ndef materialize(session, df, target_relation):\n # make sure pandas exists\n import importlib.util\n package_name = 'pandas'\n if importlib.util.find_spec(package_name):\n import pandas\n if isinstance(df, pandas.core.frame.DataFrame):\n session.use_database(target_relation.database)\n session.use_schema(target_relation.schema)\n # session.write_pandas does not have overwrite function\n df = session.createDataFrame(df)\n {% set target_relation_name = resolve_model_name(target_relation) %}\n df.write.mode(\"overwrite\").save_as_table('{{ target_relation_name }}', create_temp_table={{temporary}})\n\ndef main(session):\n dbt = dbtObj(session.table)\n df = model(dbt, session)\n materialize(session, df, dbt.this)\n return \"OK\"\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.resolve_model_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.160117, + "supported_languages": null + }, + "macro.dbt_snowflake.py_script_comment": { + "name": "py_script_comment", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/table.sql", + "original_file_path": "macros/materializations/table.sql", + "unique_id": "macro.dbt_snowflake.py_script_comment", + "macro_sql": "{% macro py_script_comment()%}\n# To run this in snowsight, you need to select entry point to be main\n# And you may have to modify the return type to text to get the result back\n# def main(session):\n# dbt = dbtObj(session.table)\n# df = model(dbt, session)\n# return df.collect()\n\n# to run this in local notebook, you need to create a session following examples https://github.com/Snowflake-Labs/sfguide-getting-started-snowpark-python\n# then you can do the following to run model\n# dbt = dbtObj(session.table)\n# df = model(dbt, session)\n{%endmacro%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1602309, + "supported_languages": null + }, + "macro.dbt_snowflake.dbt_snowflake_get_tmp_relation_type": { + "name": "dbt_snowflake_get_tmp_relation_type", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/incremental.sql", + "original_file_path": "macros/materializations/incremental.sql", + "unique_id": "macro.dbt_snowflake.dbt_snowflake_get_tmp_relation_type", + "macro_sql": "{% macro dbt_snowflake_get_tmp_relation_type(strategy, unique_key, language) %}\n{%- set tmp_relation_type = config.get('tmp_relation_type') -%}\n /* {#\n High-level principles:\n If we are running multiple statements (DELETE + INSERT),\n and we want to guarantee identical inputs to both statements,\n then we must first save the model query results as a temporary table\n (which presumably comes with a performance cost).\n If we are running a single statement (MERGE or INSERT alone),\n we _may_ save the model query definition as a view instead,\n for (presumably) faster overall incremental processing.\n\n Low-level specifics:\n If an invalid option is specified, then we will raise an\n excpetion with corresponding message.\n\n Languages other than SQL (like Python) will use a temporary table.\n With the default strategy of merge, the user may choose between a temporary\n table and view (defaulting to view).\n\n The append strategy can use a view because it will run a single INSERT statement.\n\n When unique_key is none, the delete+insert strategy can use a view beacuse a\n single INSERT statement is run with no DELETES as part of the statement.\n Otherwise, play it safe by using a temporary table.\n #} */\n\n {% if language == \"python\" and tmp_relation_type is not none %}\n {% do exceptions.raise_compiler_error(\n \"Python models currently only support 'table' for tmp_relation_type but \"\n ~ tmp_relation_type ~ \" was specified.\"\n ) %}\n {% endif %}\n\n {% if strategy == \"delete+insert\" and tmp_relation_type is not none and tmp_relation_type != \"table\" and unique_key is not none %}\n {% do exceptions.raise_compiler_error(\n \"In order to maintain consistent results when `unique_key` is not none,\n the `delete+insert` strategy only supports `table` for `tmp_relation_type` but \"\n ~ tmp_relation_type ~ \" was specified.\"\n )\n %}\n {% endif %}\n\n {% if language != \"sql\" %}\n {{ return(\"table\") }}\n {% elif tmp_relation_type == \"table\" %}\n {{ return(\"table\") }}\n {% elif tmp_relation_type == \"view\" %}\n {{ return(\"view\") }}\n {% elif strategy in (\"default\", \"merge\", \"append\") %}\n {{ return(\"view\") }}\n {% elif strategy == \"delete+insert\" and unique_key is none %}\n {{ return(\"view\") }}\n {% else %}\n {{ return(\"table\") }}\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.163136, + "supported_languages": null + }, + "macro.dbt_snowflake.materialization_incremental_snowflake": { + "name": "materialization_incremental_snowflake", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/incremental.sql", + "original_file_path": "macros/materializations/incremental.sql", + "unique_id": "macro.dbt_snowflake.materialization_incremental_snowflake", + "macro_sql": "{% materialization incremental, adapter='snowflake', supported_languages=['sql', 'python'] -%}\n\n {% set original_query_tag = set_query_tag() %}\n\n {#-- Set vars --#}\n {%- set full_refresh_mode = (should_full_refresh()) -%}\n {%- set language = model['language'] -%}\n {% set target_relation = this %}\n {% set existing_relation = load_relation(this) %}\n\n {#-- The temp relation will be a view (faster) or temp table, depending on upsert/merge strategy --#}\n {%- set unique_key = config.get('unique_key') -%}\n {% set incremental_strategy = config.get('incremental_strategy') or 'default' %}\n {% set tmp_relation_type = dbt_snowflake_get_tmp_relation_type(incremental_strategy, unique_key, language) %}\n {% set tmp_relation = make_temp_relation(this).incorporate(type=tmp_relation_type) %}\n\n {% set grant_config = config.get('grants') %}\n\n {% set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') %}\n\n {{ run_hooks(pre_hooks) }}\n\n {% if existing_relation is none %}\n {%- call statement('main', language=language) -%}\n {{ create_table_as(False, target_relation, compiled_code, language) }}\n {%- endcall -%}\n\n {% elif existing_relation.is_view %}\n {#-- Can't overwrite a view with a table - we must drop --#}\n {{ log(\"Dropping relation \" ~ target_relation ~ \" because it is a view and this model is a table.\") }}\n {% do adapter.drop_relation(existing_relation) %}\n {%- call statement('main', language=language) -%}\n {{ create_table_as(False, target_relation, compiled_code, language) }}\n {%- endcall -%}\n {% elif full_refresh_mode %}\n {%- call statement('main', language=language) -%}\n {{ create_table_as(False, target_relation, compiled_code, language) }}\n {%- endcall -%}\n\n {% else %}\n {#-- Create the temp relation, either as a view or as a temp table --#}\n {% if tmp_relation_type == 'view' %}\n {%- call statement('create_tmp_relation') -%}\n {{ snowflake__create_view_as_with_temp_flag(tmp_relation, compiled_code, True) }}\n {%- endcall -%}\n {% else %}\n {%- call statement('create_tmp_relation', language=language) -%}\n {{ create_table_as(True, tmp_relation, compiled_code, language) }}\n {%- endcall -%}\n {% endif %}\n\n {% do adapter.expand_target_column_types(\n from_relation=tmp_relation,\n to_relation=target_relation) %}\n {#-- Process schema changes. Returns dict of changes if successful. Use source columns for upserting/merging --#}\n {% set dest_columns = process_schema_changes(on_schema_change, tmp_relation, existing_relation) %}\n {% if not dest_columns %}\n {% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}\n {% endif %}\n\n {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#}\n {% set incremental_predicates = config.get('predicates', none) or config.get('incremental_predicates', none) %}\n {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %}\n {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': tmp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'incremental_predicates': incremental_predicates }) %}\n\n {%- call statement('main') -%}\n {{ strategy_sql_macro_func(strategy_arg_dict) }}\n {%- endcall -%}\n {% endif %}\n\n {% do drop_relation_if_exists(tmp_relation) %}\n\n {{ run_hooks(post_hooks) }}\n\n {% set target_relation = target_relation.incorporate(type='table') %}\n\n {% set should_revoke =\n should_revoke(existing_relation.is_table, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% do unset_query_tag(original_query_tag) %}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.should_full_refresh", + "macro.dbt.load_relation", + "macro.dbt_snowflake.dbt_snowflake_get_tmp_relation_type", + "macro.dbt.make_temp_relation", + "macro.dbt.incremental_validate_on_schema_change", + "macro.dbt.run_hooks", + "macro.dbt.statement", + "macro.dbt.create_table_as", + "macro.dbt_snowflake.snowflake__create_view_as_with_temp_flag", + "macro.dbt.process_schema_changes", + "macro.dbt.drop_relation_if_exists", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.166732, + "supported_languages": ["sql", "python"] + }, + "macro.dbt_snowflake.snowflake__get_incremental_default_sql": { + "name": "snowflake__get_incremental_default_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/incremental.sql", + "original_file_path": "macros/materializations/incremental.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_incremental_default_sql", + "macro_sql": "{% macro snowflake__get_incremental_default_sql(arg_dict) %}\n {{ return(get_incremental_merge_sql(arg_dict)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_incremental_merge_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.166935, + "supported_languages": null + }, + "macro.dbt_snowflake.materialization_snapshot_snowflake": { + "name": "materialization_snapshot_snowflake", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/snapshot.sql", + "original_file_path": "macros/materializations/snapshot.sql", + "unique_id": "macro.dbt_snowflake.materialization_snapshot_snowflake", + "macro_sql": "{% materialization snapshot, adapter='snowflake' %}\n {% set original_query_tag = set_query_tag() %}\n {% set relations = materialization_snapshot_default() %}\n\n {% do unset_query_tag(original_query_tag) %}\n\n {{ return(relations) }}\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.materialization_snapshot_default", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1673348, + "supported_languages": ["sql"] + }, + "macro.dbt_snowflake.snowflake__can_clone_table": { + "name": "snowflake__can_clone_table", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/clone.sql", + "original_file_path": "macros/materializations/clone.sql", + "unique_id": "macro.dbt_snowflake.snowflake__can_clone_table", + "macro_sql": "{% macro snowflake__can_clone_table() %}\n {{ return(True) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.167609, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__create_or_replace_clone": { + "name": "snowflake__create_or_replace_clone", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/clone.sql", + "original_file_path": "macros/materializations/clone.sql", + "unique_id": "macro.dbt_snowflake.snowflake__create_or_replace_clone", + "macro_sql": "{% macro snowflake__create_or_replace_clone(this_relation, defer_relation) %}\n create or replace\n {{ \"transient\" if config.get(\"transient\", true) }}\n table {{ this_relation }}\n clone {{ defer_relation }}\n {{ \"copy grants\" if config.get(\"copy_grants\", false) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.167936, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_alter_dynamic_table_as_sql": { + "name": "snowflake__get_alter_dynamic_table_as_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/ddl.sql", + "original_file_path": "macros/materializations/dynamic_table/ddl.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_alter_dynamic_table_as_sql", + "macro_sql": "{% macro snowflake__get_alter_dynamic_table_as_sql(\n target_relation,\n configuration_changes,\n sql,\n existing_relation,\n backup_relation,\n intermediate_relation\n) -%}\n {{- log('Applying ALTER to: ' ~ target_relation) -}}\n\n {% if configuration_changes.requires_full_refresh %}\n {{- snowflake__get_replace_dynamic_table_as_sql(target_relation, sql, existing_relation, backup_relation, intermediate_relation) -}}\n\n {% else %}\n\n {%- set target_lag = configuration_changes.target_lag -%}\n {%- if target_lag -%}{{- log('Applying UPDATE TARGET_LAG to: ' ~ existing_relation) -}}{%- endif -%}\n {%- set snowflake_warehouse = configuration_changes.snowflake_warehouse -%}\n {%- if snowflake_warehouse -%}{{- log('Applying UPDATE WAREHOUSE to: ' ~ existing_relation) -}}{%- endif -%}\n\n alter dynamic table {{ existing_relation }} set\n {% if target_lag %}target_lag = '{{ target_lag.context }}'{% endif %}\n {% if snowflake_warehouse %}warehouse = {{ snowflake_warehouse.context }}{% endif %}\n\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__get_replace_dynamic_table_as_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.171189, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_create_dynamic_table_as_sql": { + "name": "snowflake__get_create_dynamic_table_as_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/ddl.sql", + "original_file_path": "macros/materializations/dynamic_table/ddl.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_create_dynamic_table_as_sql", + "macro_sql": "{% macro snowflake__get_create_dynamic_table_as_sql(relation, sql) -%}\n {{- log('Applying CREATE to: ' ~ relation) -}}\n\n create or replace dynamic table {{ relation }}\n target_lag = '{{ config.get(\"target_lag\") }}'\n warehouse = {{ config.get(\"snowflake_warehouse\") }}\n as (\n {{ sql }}\n )\n ;\n {{ snowflake__refresh_dynamic_table(relation) }}\n\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__refresh_dynamic_table"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1715462, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__describe_dynamic_table": { + "name": "snowflake__describe_dynamic_table", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/ddl.sql", + "original_file_path": "macros/materializations/dynamic_table/ddl.sql", + "unique_id": "macro.dbt_snowflake.snowflake__describe_dynamic_table", + "macro_sql": "{% macro snowflake__describe_dynamic_table(relation) %}\n {%- set _dynamic_table_sql -%}\n show dynamic tables\n like '{{ relation.identifier }}'\n in schema {{ relation.database }}.{{ relation.schema }}\n ;\n select\n \"name\",\n \"schema_name\",\n \"database_name\",\n \"text\",\n \"target_lag\",\n \"warehouse\"\n from table(result_scan(last_query_id()))\n {%- endset %}\n {% set _dynamic_table = run_query(_dynamic_table_sql) %}\n\n {% do return({'dynamic_table': _dynamic_table}) %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.171936, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_replace_dynamic_table_as_sql": { + "name": "snowflake__get_replace_dynamic_table_as_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/ddl.sql", + "original_file_path": "macros/materializations/dynamic_table/ddl.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_replace_dynamic_table_as_sql", + "macro_sql": "{% macro snowflake__get_replace_dynamic_table_as_sql(target_relation, sql, existing_relation, backup_relation, intermediate_relation) -%}\n {{- log('Applying REPLACE to: ' ~ target_relation) -}}\n {{ snowflake__get_drop_dynamic_table_sql(existing_relation) }};\n {{ snowflake__get_create_dynamic_table_as_sql(target_relation, sql) }}\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__get_drop_dynamic_table_sql", + "macro.dbt_snowflake.snowflake__get_create_dynamic_table_as_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.172456, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__refresh_dynamic_table": { + "name": "snowflake__refresh_dynamic_table", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/ddl.sql", + "original_file_path": "macros/materializations/dynamic_table/ddl.sql", + "unique_id": "macro.dbt_snowflake.snowflake__refresh_dynamic_table", + "macro_sql": "{% macro snowflake__refresh_dynamic_table(relation) -%}\n {{- log('Applying REFRESH to: ' ~ relation) -}}\n\n alter dynamic table {{ relation }} refresh\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.172673, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_dynamic_table_configuration_changes": { + "name": "snowflake__get_dynamic_table_configuration_changes", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/ddl.sql", + "original_file_path": "macros/materializations/dynamic_table/ddl.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_dynamic_table_configuration_changes", + "macro_sql": "{% macro snowflake__get_dynamic_table_configuration_changes(existing_relation, new_config) -%}\n {% set _existing_dynamic_table = snowflake__describe_dynamic_table(existing_relation) %}\n {% set _configuration_changes = existing_relation.dynamic_table_config_changeset(_existing_dynamic_table, new_config) %}\n {% do return(_configuration_changes) %}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__describe_dynamic_table"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1729789, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__get_drop_dynamic_table_sql": { + "name": "snowflake__get_drop_dynamic_table_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/ddl.sql", + "original_file_path": "macros/materializations/dynamic_table/ddl.sql", + "unique_id": "macro.dbt_snowflake.snowflake__get_drop_dynamic_table_sql", + "macro_sql": "{% macro snowflake__get_drop_dynamic_table_sql(relation) %}\n drop dynamic table if exists {{ relation }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.173096, + "supported_languages": null + }, + "macro.dbt_snowflake.materialization_dynamic_table_snowflake": { + "name": "materialization_dynamic_table_snowflake", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/materialization.sql", + "original_file_path": "macros/materializations/dynamic_table/materialization.sql", + "unique_id": "macro.dbt_snowflake.materialization_dynamic_table_snowflake", + "macro_sql": "{% materialization dynamic_table, adapter='snowflake' %}\n\n {% set original_query_tag = set_query_tag() %}\n\n {% set existing_relation = load_cached_relation(this) %}\n {% set target_relation = this.incorporate(type=this.DynamicTable) %}\n {% set intermediate_relation = make_intermediate_relation(target_relation) %}\n {% set backup_relation_type = target_relation.DynamicTable if existing_relation is none else existing_relation.type %}\n {% set backup_relation = make_backup_relation(target_relation, backup_relation_type) %}\n\n {{ dynamic_table_setup(backup_relation, intermediate_relation, pre_hooks) }}\n\n {% set build_sql = dynamic_table_get_build_sql(existing_relation, target_relation, backup_relation, intermediate_relation) %}\n\n {% if build_sql == '' %}\n {{ dynamic_table_execute_no_op(target_relation) }}\n {% else %}\n {{ dynamic_table_execute_build_sql(build_sql, existing_relation, target_relation, post_hooks) }}\n {% endif %}\n\n {{ dynamic_table_teardown(backup_relation, intermediate_relation, post_hooks) }}\n\n {% do unset_query_tag(original_query_tag) %}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.set_query_tag", + "macro.dbt.load_cached_relation", + "macro.dbt.make_intermediate_relation", + "macro.dbt.make_backup_relation", + "macro.dbt_snowflake.dynamic_table_setup", + "macro.dbt_snowflake.dynamic_table_get_build_sql", + "macro.dbt_snowflake.dynamic_table_execute_no_op", + "macro.dbt_snowflake.dynamic_table_execute_build_sql", + "macro.dbt_snowflake.dynamic_table_teardown", + "macro.dbt_snowflake.unset_query_tag" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1784918, + "supported_languages": ["sql"] + }, + "macro.dbt_snowflake.dynamic_table_setup": { + "name": "dynamic_table_setup", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/materialization.sql", + "original_file_path": "macros/materializations/dynamic_table/materialization.sql", + "unique_id": "macro.dbt_snowflake.dynamic_table_setup", + "macro_sql": "{% macro dynamic_table_setup(backup_relation, intermediate_relation, pre_hooks) %}\n\n -- backup_relation and intermediate_relation should not already exist in the database\n -- it's possible these exist because of a previous run that exited unexpectedly\n {% set preexisting_backup_relation = load_cached_relation(backup_relation) %}\n {% set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) %}\n\n -- drop the temp relations if they exist already in the database\n {{ snowflake__get_drop_dynamic_table_sql(preexisting_backup_relation) }}\n {{ snowflake__get_drop_dynamic_table_sql(preexisting_intermediate_relation) }}\n\n {{ run_hooks(pre_hooks) }}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.load_cached_relation", + "macro.dbt_snowflake.snowflake__get_drop_dynamic_table_sql", + "macro.dbt.run_hooks" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.178883, + "supported_languages": null + }, + "macro.dbt_snowflake.dynamic_table_teardown": { + "name": "dynamic_table_teardown", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/materialization.sql", + "original_file_path": "macros/materializations/dynamic_table/materialization.sql", + "unique_id": "macro.dbt_snowflake.dynamic_table_teardown", + "macro_sql": "{% macro dynamic_table_teardown(backup_relation, intermediate_relation, post_hooks) %}\n\n -- drop the temp relations if they exist to leave the database clean for the next run\n {{ snowflake__get_drop_dynamic_table_sql(backup_relation) }}\n {{ snowflake__get_drop_dynamic_table_sql(intermediate_relation) }}\n\n {{ run_hooks(post_hooks) }}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__get_drop_dynamic_table_sql", + "macro.dbt.run_hooks" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.17913, + "supported_languages": null + }, + "macro.dbt_snowflake.dynamic_table_get_build_sql": { + "name": "dynamic_table_get_build_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/materialization.sql", + "original_file_path": "macros/materializations/dynamic_table/materialization.sql", + "unique_id": "macro.dbt_snowflake.dynamic_table_get_build_sql", + "macro_sql": "{% macro dynamic_table_get_build_sql(existing_relation, target_relation, backup_relation, intermediate_relation) %}\n\n {% set full_refresh_mode = should_full_refresh() %}\n\n -- determine the scenario we're in: create, full_refresh, alter, refresh data\n {% if existing_relation is none %}\n {% set build_sql = snowflake__get_create_dynamic_table_as_sql(target_relation, sql) %}\n {% elif full_refresh_mode or not existing_relation.is_dynamic_table %}\n {% set build_sql = snowflake__get_replace_dynamic_table_as_sql(target_relation, sql, existing_relation, backup_relation, intermediate_relation) %}\n {% else %}\n\n -- get config options\n {% set on_configuration_change = config.get('on_configuration_change') %}\n {% set configuration_changes = snowflake__get_dynamic_table_configuration_changes(existing_relation, config) %}\n\n {% if configuration_changes is none %}\n {% set build_sql = '' %}\n {{ exceptions.warn(\"No configuration changes were identified on: `\" ~ target_relation ~ \"`. Continuing.\") }}\n\n {% elif on_configuration_change == 'apply' %}\n {% set build_sql = snowflake__get_alter_dynamic_table_as_sql(target_relation, configuration_changes, sql, existing_relation, backup_relation, intermediate_relation) %}\n {% elif on_configuration_change == 'continue' %}\n {% set build_sql = '' %}\n {{ exceptions.warn(\"Configuration changes were identified and `on_configuration_change` was set to `continue` for `\" ~ target_relation ~ \"`\") }}\n {% elif on_configuration_change == 'fail' %}\n {{ exceptions.raise_fail_fast_error(\"Configuration changes were identified and `on_configuration_change` was set to `fail` for `\" ~ target_relation ~ \"`\") }}\n\n {% else %}\n -- this only happens if the user provides a value other than `apply`, 'continue', 'fail'\n {{ exceptions.raise_compiler_error(\"Unexpected configuration scenario: `\" ~ on_configuration_change ~ \"`\") }}\n\n {% endif %}\n\n {% endif %}\n\n {% do return(build_sql) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.should_full_refresh", + "macro.dbt_snowflake.snowflake__get_create_dynamic_table_as_sql", + "macro.dbt_snowflake.snowflake__get_replace_dynamic_table_as_sql", + "macro.dbt_snowflake.snowflake__get_dynamic_table_configuration_changes", + "macro.dbt_snowflake.snowflake__get_alter_dynamic_table_as_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1806989, + "supported_languages": null + }, + "macro.dbt_snowflake.dynamic_table_execute_no_op": { + "name": "dynamic_table_execute_no_op", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/materialization.sql", + "original_file_path": "macros/materializations/dynamic_table/materialization.sql", + "unique_id": "macro.dbt_snowflake.dynamic_table_execute_no_op", + "macro_sql": "{% macro dynamic_table_execute_no_op(target_relation) %}\n {% do store_raw_result(\n name=\"main\",\n message=\"skip \" ~ target_relation,\n code=\"skip\",\n rows_affected=\"-1\"\n ) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.18095, + "supported_languages": null + }, + "macro.dbt_snowflake.dynamic_table_execute_build_sql": { + "name": "dynamic_table_execute_build_sql", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/materializations/dynamic_table/materialization.sql", + "original_file_path": "macros/materializations/dynamic_table/materialization.sql", + "unique_id": "macro.dbt_snowflake.dynamic_table_execute_build_sql", + "macro_sql": "{% macro dynamic_table_execute_build_sql(build_sql, existing_relation, target_relation, post_hooks) %}\n\n {% set grant_config = config.get('grants') %}\n\n {% call statement(name=\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.statement", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.181485, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__current_timestamp": { + "name": "snowflake__current_timestamp", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/timestamps.sql", + "original_file_path": "macros/utils/timestamps.sql", + "unique_id": "macro.dbt_snowflake.snowflake__current_timestamp", + "macro_sql": "{% macro snowflake__current_timestamp() -%}\n convert_timezone('UTC', current_timestamp())\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.181809, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__snapshot_string_as_time": { + "name": "snowflake__snapshot_string_as_time", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/timestamps.sql", + "original_file_path": "macros/utils/timestamps.sql", + "unique_id": "macro.dbt_snowflake.snowflake__snapshot_string_as_time", + "macro_sql": "{% macro snowflake__snapshot_string_as_time(timestamp) -%}\n {%- set result = \"to_timestamp_ntz('\" ~ timestamp ~ \"')\" -%}\n {{ return(result) }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1820009, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__snapshot_get_time": { + "name": "snowflake__snapshot_get_time", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/timestamps.sql", + "original_file_path": "macros/utils/timestamps.sql", + "unique_id": "macro.dbt_snowflake.snowflake__snapshot_get_time", + "macro_sql": "{% macro snowflake__snapshot_get_time() -%}\n to_timestamp_ntz({{ current_timestamp() }})\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.current_timestamp"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.18221, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__current_timestamp_backcompat": { + "name": "snowflake__current_timestamp_backcompat", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/timestamps.sql", + "original_file_path": "macros/utils/timestamps.sql", + "unique_id": "macro.dbt_snowflake.snowflake__current_timestamp_backcompat", + "macro_sql": "{% macro snowflake__current_timestamp_backcompat() %}\n current_timestamp::{{ type_timestamp() }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations.type_timestamp"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.182401, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__current_timestamp_in_utc_backcompat": { + "name": "snowflake__current_timestamp_in_utc_backcompat", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/timestamps.sql", + "original_file_path": "macros/utils/timestamps.sql", + "unique_id": "macro.dbt_snowflake.snowflake__current_timestamp_in_utc_backcompat", + "macro_sql": "{% macro snowflake__current_timestamp_in_utc_backcompat() %}\n convert_timezone('UTC', {{ snowflake__current_timestamp_backcompat() }})::{{ type_timestamp() }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__current_timestamp_backcompat", + "macro.dbt_expectations.type_timestamp" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.182571, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__escape_single_quotes": { + "name": "snowflake__escape_single_quotes", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/escape_single_quotes.sql", + "original_file_path": "macros/utils/escape_single_quotes.sql", + "unique_id": "macro.dbt_snowflake.snowflake__escape_single_quotes", + "macro_sql": "{% macro snowflake__escape_single_quotes(expression) -%}\n{{ expression | replace(\"'\", \"\\\\'\") }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.18282, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__right": { + "name": "snowflake__right", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/right.sql", + "original_file_path": "macros/utils/right.sql", + "unique_id": "macro.dbt_snowflake.snowflake__right", + "macro_sql": "{% macro snowflake__right(string_text, length_expression) %}\n\n case when {{ length_expression }} = 0\n then ''\n else\n right(\n {{ string_text }},\n {{ length_expression }}\n )\n end\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.183084, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__safe_cast": { + "name": "snowflake__safe_cast", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/safe_cast.sql", + "original_file_path": "macros/utils/safe_cast.sql", + "unique_id": "macro.dbt_snowflake.snowflake__safe_cast", + "macro_sql": "{% macro snowflake__safe_cast(field, type) %}\n try_cast({{field}} as {{type}})\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1832662, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__bool_or": { + "name": "snowflake__bool_or", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/bool_or.sql", + "original_file_path": "macros/utils/bool_or.sql", + "unique_id": "macro.dbt_snowflake.snowflake__bool_or", + "macro_sql": "{% macro snowflake__bool_or(expression) -%}\n\n boolor_agg({{ expression }})\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.183407, + "supported_languages": null + }, + "macro.dbt_snowflake.snowflake__array_construct": { + "name": "snowflake__array_construct", + "resource_type": "macro", + "package_name": "dbt_snowflake", + "path": "macros/utils/array_construct.sql", + "original_file_path": "macros/utils/array_construct.sql", + "unique_id": "macro.dbt_snowflake.snowflake__array_construct", + "macro_sql": "{% macro snowflake__array_construct(inputs, data_type) -%}\n array_construct( {{ inputs|join(' , ') }} )\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.183622, + "supported_languages": null + }, + "macro.dbt.run_hooks": { + "name": "run_hooks", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/hooks.sql", + "original_file_path": "macros/materializations/hooks.sql", + "unique_id": "macro.dbt.run_hooks", + "macro_sql": "{% macro run_hooks(hooks, inside_transaction=True) %}\n {% for hook in hooks | selectattr('transaction', 'equalto', inside_transaction) %}\n {% if not inside_transaction and loop.first %}\n {% call statement(auto_begin=inside_transaction) %}\n commit;\n {% endcall %}\n {% endif %}\n {% set rendered = render(hook.get('sql')) | trim %}\n {% if (rendered | length) > 0 %}\n {% call statement(auto_begin=inside_transaction) %}\n {{ rendered }}\n {% endcall %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1849952, + "supported_languages": null + }, + "macro.dbt.make_hook_config": { + "name": "make_hook_config", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/hooks.sql", + "original_file_path": "macros/materializations/hooks.sql", + "unique_id": "macro.dbt.make_hook_config", + "macro_sql": "{% macro make_hook_config(sql, inside_transaction) %}\n {{ tojson({\"sql\": sql, \"transaction\": inside_transaction}) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.185222, + "supported_languages": null + }, + "macro.dbt.before_begin": { + "name": "before_begin", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/hooks.sql", + "original_file_path": "macros/materializations/hooks.sql", + "unique_id": "macro.dbt.before_begin", + "macro_sql": "{% macro before_begin(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.make_hook_config"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.18538, + "supported_languages": null + }, + "macro.dbt.in_transaction": { + "name": "in_transaction", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/hooks.sql", + "original_file_path": "macros/materializations/hooks.sql", + "unique_id": "macro.dbt.in_transaction", + "macro_sql": "{% macro in_transaction(sql) %}\n {{ make_hook_config(sql, inside_transaction=True) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.make_hook_config"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1855302, + "supported_languages": null + }, + "macro.dbt.after_commit": { + "name": "after_commit", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/hooks.sql", + "original_file_path": "macros/materializations/hooks.sql", + "unique_id": "macro.dbt.after_commit", + "macro_sql": "{% macro after_commit(sql) %}\n {{ make_hook_config(sql, inside_transaction=False) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.make_hook_config"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.185684, + "supported_languages": null + }, + "macro.dbt.set_sql_header": { + "name": "set_sql_header", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/configs.sql", + "original_file_path": "macros/materializations/configs.sql", + "unique_id": "macro.dbt.set_sql_header", + "macro_sql": "{% macro set_sql_header(config) -%}\n {{ config.set('sql_header', caller()) }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.186094, + "supported_languages": null + }, + "macro.dbt.should_full_refresh": { + "name": "should_full_refresh", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/configs.sql", + "original_file_path": "macros/materializations/configs.sql", + "unique_id": "macro.dbt.should_full_refresh", + "macro_sql": "{% macro should_full_refresh() %}\n {% set config_full_refresh = config.get('full_refresh') %}\n {% if config_full_refresh is none %}\n {% set config_full_refresh = flags.FULL_REFRESH %}\n {% endif %}\n {% do return(config_full_refresh) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1864161, + "supported_languages": null + }, + "macro.dbt.should_store_failures": { + "name": "should_store_failures", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/configs.sql", + "original_file_path": "macros/materializations/configs.sql", + "unique_id": "macro.dbt.should_store_failures", + "macro_sql": "{% macro should_store_failures() %}\n {% set config_store_failures = config.get('store_failures') %}\n {% if config_store_failures is none %}\n {% set config_store_failures = flags.STORE_FAILURES %}\n {% endif %}\n {% do return(config_store_failures) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1867409, + "supported_languages": null + }, + "macro.dbt.snapshot_merge_sql": { + "name": "snapshot_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/snapshot_merge.sql", + "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", + "unique_id": "macro.dbt.snapshot_merge_sql", + "macro_sql": "{% macro snapshot_merge_sql(target, source, insert_cols) -%}\n {{ adapter.dispatch('snapshot_merge_sql', 'dbt')(target, source, insert_cols) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__snapshot_merge_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.187218, + "supported_languages": null + }, + "macro.dbt.default__snapshot_merge_sql": { + "name": "default__snapshot_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/snapshot_merge.sql", + "original_file_path": "macros/materializations/snapshots/snapshot_merge.sql", + "unique_id": "macro.dbt.default__snapshot_merge_sql", + "macro_sql": "{% macro default__snapshot_merge_sql(target, source, insert_cols) -%}\n {%- set insert_cols_csv = insert_cols | join(', ') -%}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on DBT_INTERNAL_SOURCE.dbt_scd_id = DBT_INTERNAL_DEST.dbt_scd_id\n\n when matched\n and DBT_INTERNAL_DEST.dbt_valid_to is null\n and DBT_INTERNAL_SOURCE.dbt_change_type in ('update', 'delete')\n then update\n set dbt_valid_to = DBT_INTERNAL_SOURCE.dbt_valid_to\n\n when not matched\n and DBT_INTERNAL_SOURCE.dbt_change_type = 'insert'\n then insert ({{ insert_cols_csv }})\n values ({{ insert_cols_csv }})\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1875231, + "supported_languages": null + }, + "macro.dbt.strategy_dispatch": { + "name": "strategy_dispatch", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.strategy_dispatch", + "macro_sql": "{% macro strategy_dispatch(name) -%}\n{% set original_name = name %}\n {% if '.' in name %}\n {% set package_name, name = name.split(\".\", 1) %}\n {% else %}\n {% set package_name = none %}\n {% endif %}\n\n {% if package_name is none %}\n {% set package_context = context %}\n {% elif package_name in context %}\n {% set package_context = context[package_name] %}\n {% else %}\n {% set error_msg %}\n Could not find package '{{package_name}}', called with '{{original_name}}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n\n {%- set search_name = 'snapshot_' ~ name ~ '_strategy' -%}\n\n {% if search_name not in package_context %}\n {% set error_msg %}\n The specified strategy macro '{{name}}' was not found in package '{{ package_name }}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n {{ return(package_context[search_name]) }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1914802, + "supported_languages": null + }, + "macro.dbt.snapshot_hash_arguments": { + "name": "snapshot_hash_arguments", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.snapshot_hash_arguments", + "macro_sql": "{% macro snapshot_hash_arguments(args) -%}\n {{ adapter.dispatch('snapshot_hash_arguments', 'dbt')(args) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__snapshot_hash_arguments"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1916769, + "supported_languages": null + }, + "macro.dbt.default__snapshot_hash_arguments": { + "name": "default__snapshot_hash_arguments", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.default__snapshot_hash_arguments", + "macro_sql": "{% macro default__snapshot_hash_arguments(args) -%}\n md5({%- for arg in args -%}\n coalesce(cast({{ arg }} as varchar ), '')\n {% if not loop.last %} || '|' || {% endif %}\n {%- endfor -%})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1919181, + "supported_languages": null + }, + "macro.dbt.snapshot_timestamp_strategy": { + "name": "snapshot_timestamp_strategy", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.snapshot_timestamp_strategy", + "macro_sql": "{% macro snapshot_timestamp_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set primary_key = config['unique_key'] %}\n {% set updated_at = config['updated_at'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n\n {#/*\n The snapshot relation might not have an {{ updated_at }} value if the\n snapshot strategy is changed from `check` to `timestamp`. We\n should use a dbt-created column for the comparison in the snapshot\n table instead of assuming that the user-supplied {{ updated_at }}\n will be present in the historical data.\n\n See https://github.com/dbt-labs/dbt-core/issues/2350\n */ #}\n {% set row_changed_expr -%}\n ({{ snapshotted_rel }}.dbt_valid_from < {{ current_rel }}.{{ updated_at }})\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.snapshot_hash_arguments"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.1929312, + "supported_languages": null + }, + "macro.dbt.snapshot_string_as_time": { + "name": "snapshot_string_as_time", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.snapshot_string_as_time", + "macro_sql": "{% macro snapshot_string_as_time(timestamp) -%}\n {{ adapter.dispatch('snapshot_string_as_time', 'dbt')(timestamp) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__snapshot_string_as_time"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.193171, + "supported_languages": null + }, + "macro.dbt.default__snapshot_string_as_time": { + "name": "default__snapshot_string_as_time", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.default__snapshot_string_as_time", + "macro_sql": "{% macro default__snapshot_string_as_time(timestamp) %}\n {% do exceptions.raise_not_implemented(\n 'snapshot_string_as_time macro not implemented for adapter '+adapter.type()\n ) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.193372, + "supported_languages": null + }, + "macro.dbt.snapshot_check_all_get_existing_columns": { + "name": "snapshot_check_all_get_existing_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.snapshot_check_all_get_existing_columns", + "macro_sql": "{% macro snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) -%}\n {%- if not target_exists -%}\n {#-- no table yet -> return whatever the query does --#}\n {{ return((false, query_columns)) }}\n {%- endif -%}\n\n {#-- handle any schema changes --#}\n {%- set target_relation = adapter.get_relation(database=node.database, schema=node.schema, identifier=node.alias) -%}\n\n {% if check_cols_config == 'all' %}\n {%- set query_columns = get_columns_in_query(node['compiled_code']) -%}\n\n {% elif check_cols_config is iterable and (check_cols_config | length) > 0 %}\n {#-- query for proper casing/quoting, to support comparison below --#}\n {%- set select_check_cols_from_target -%}\n {#-- N.B. The whitespace below is necessary to avoid edge case issue with comments --#}\n {#-- See: https://github.com/dbt-labs/dbt-core/issues/6781 --#}\n select {{ check_cols_config | join(', ') }} from (\n {{ node['compiled_code'] }}\n ) subq\n {%- endset -%}\n {% set query_columns = get_columns_in_query(select_check_cols_from_target) %}\n\n {% else %}\n {% do exceptions.raise_compiler_error(\"Invalid value for 'check_cols': \" ~ check_cols_config) %}\n {% endif %}\n\n {%- set existing_cols = adapter.get_columns_in_relation(target_relation) | map(attribute = 'name') | list -%}\n {%- set ns = namespace() -%} {#-- handle for-loop scoping with a namespace --#}\n {%- set ns.column_added = false -%}\n\n {%- set intersection = [] -%}\n {%- for col in query_columns -%}\n {%- if col in existing_cols -%}\n {%- do intersection.append(adapter.quote(col)) -%}\n {%- else -%}\n {% set ns.column_added = true %}\n {%- endif -%}\n {%- endfor -%}\n {{ return((ns.column_added, intersection)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_columns_in_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.194912, + "supported_languages": null + }, + "macro.dbt.snapshot_check_strategy": { + "name": "snapshot_check_strategy", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/strategies.sql", + "original_file_path": "macros/materializations/snapshots/strategies.sql", + "unique_id": "macro.dbt.snapshot_check_strategy", + "macro_sql": "{% macro snapshot_check_strategy(node, snapshotted_rel, current_rel, config, target_exists) %}\n {% set check_cols_config = config['check_cols'] %}\n {% set primary_key = config['unique_key'] %}\n {% set invalidate_hard_deletes = config.get('invalidate_hard_deletes', false) %}\n {% set updated_at = config.get('updated_at', snapshot_get_time()) %}\n\n {% set column_added = false %}\n\n {% set column_added, check_cols = snapshot_check_all_get_existing_columns(node, target_exists, check_cols_config) %}\n\n {%- set row_changed_expr -%}\n (\n {%- if column_added -%}\n {{ get_true_sql() }}\n {%- else -%}\n {%- for col in check_cols -%}\n {{ snapshotted_rel }}.{{ col }} != {{ current_rel }}.{{ col }}\n or\n (\n (({{ snapshotted_rel }}.{{ col }} is null) and not ({{ current_rel }}.{{ col }} is null))\n or\n ((not {{ snapshotted_rel }}.{{ col }} is null) and ({{ current_rel }}.{{ col }} is null))\n )\n {%- if not loop.last %} or {% endif -%}\n {%- endfor -%}\n {%- endif -%}\n )\n {%- endset %}\n\n {% set scd_id_expr = snapshot_hash_arguments([primary_key, updated_at]) %}\n\n {% do return({\n \"unique_key\": primary_key,\n \"updated_at\": updated_at,\n \"row_changed\": row_changed_expr,\n \"scd_id\": scd_id_expr,\n \"invalidate_hard_deletes\": invalidate_hard_deletes\n }) %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.snapshot_get_time", + "macro.dbt.snapshot_check_all_get_existing_columns", + "macro.dbt.get_true_sql", + "macro.dbt.snapshot_hash_arguments" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.196316, + "supported_languages": null + }, + "macro.dbt.create_columns": { + "name": "create_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.create_columns", + "macro_sql": "{% macro create_columns(relation, columns) %}\n {{ adapter.dispatch('create_columns', 'dbt')(relation, columns) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__create_columns"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.201, + "supported_languages": null + }, + "macro.dbt.default__create_columns": { + "name": "default__create_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.default__create_columns", + "macro_sql": "{% macro default__create_columns(relation, columns) %}\n {% for column in columns %}\n {% call statement() %}\n alter table {{ relation }} add column \"{{ column.name }}\" {{ column.data_type }};\n {% endcall %}\n {% endfor %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.201317, + "supported_languages": null + }, + "macro.dbt.post_snapshot": { + "name": "post_snapshot", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.post_snapshot", + "macro_sql": "{% macro post_snapshot(staging_relation) %}\n {{ adapter.dispatch('post_snapshot', 'dbt')(staging_relation) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__post_snapshot"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.201498, + "supported_languages": null + }, + "macro.dbt.default__post_snapshot": { + "name": "default__post_snapshot", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.default__post_snapshot", + "macro_sql": "{% macro default__post_snapshot(staging_relation) %}\n {# no-op #}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2015948, + "supported_languages": null + }, + "macro.dbt.get_true_sql": { + "name": "get_true_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.get_true_sql", + "macro_sql": "{% macro get_true_sql() %}\n {{ adapter.dispatch('get_true_sql', 'dbt')() }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_true_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.201746, + "supported_languages": null + }, + "macro.dbt.default__get_true_sql": { + "name": "default__get_true_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.default__get_true_sql", + "macro_sql": "{% macro default__get_true_sql() %}\n {{ return('TRUE') }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.201865, + "supported_languages": null + }, + "macro.dbt.snapshot_staging_table": { + "name": "snapshot_staging_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.snapshot_staging_table", + "macro_sql": "{% macro snapshot_staging_table(strategy, source_sql, target_relation) -%}\n {{ adapter.dispatch('snapshot_staging_table', 'dbt')(strategy, source_sql, target_relation) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__snapshot_staging_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2020748, + "supported_languages": null + }, + "macro.dbt.default__snapshot_staging_table": { + "name": "default__snapshot_staging_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.default__snapshot_staging_table", + "macro_sql": "{% macro default__snapshot_staging_table(strategy, source_sql, target_relation) -%}\n\n with snapshot_query as (\n\n {{ source_sql }}\n\n ),\n\n snapshotted_data as (\n\n select *,\n {{ strategy.unique_key }} as dbt_unique_key\n\n from {{ target_relation }}\n where dbt_valid_to is null\n\n ),\n\n insertions_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to,\n {{ strategy.scd_id }} as dbt_scd_id\n\n from snapshot_query\n ),\n\n updates_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n {{ strategy.updated_at }} as dbt_valid_to\n\n from snapshot_query\n ),\n\n {%- if strategy.invalidate_hard_deletes %}\n\n deletes_source_data as (\n\n select\n *,\n {{ strategy.unique_key }} as dbt_unique_key\n from snapshot_query\n ),\n {% endif %}\n\n insertions as (\n\n select\n 'insert' as dbt_change_type,\n source_data.*\n\n from insertions_source_data as source_data\n left outer join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where snapshotted_data.dbt_unique_key is null\n or (\n snapshotted_data.dbt_unique_key is not null\n and (\n {{ strategy.row_changed }}\n )\n )\n\n ),\n\n updates as (\n\n select\n 'update' as dbt_change_type,\n source_data.*,\n snapshotted_data.dbt_scd_id\n\n from updates_source_data as source_data\n join snapshotted_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where (\n {{ strategy.row_changed }}\n )\n )\n\n {%- if strategy.invalidate_hard_deletes -%}\n ,\n\n deletes as (\n\n select\n 'delete' as dbt_change_type,\n source_data.*,\n {{ snapshot_get_time() }} as dbt_valid_from,\n {{ snapshot_get_time() }} as dbt_updated_at,\n {{ snapshot_get_time() }} as dbt_valid_to,\n snapshotted_data.dbt_scd_id\n\n from snapshotted_data\n left join deletes_source_data as source_data on snapshotted_data.dbt_unique_key = source_data.dbt_unique_key\n where source_data.dbt_unique_key is null\n )\n {%- endif %}\n\n select * from insertions\n union all\n select * from updates\n {%- if strategy.invalidate_hard_deletes %}\n union all\n select * from deletes\n {%- endif %}\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.snapshot_get_time"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.203191, + "supported_languages": null + }, + "macro.dbt.build_snapshot_table": { + "name": "build_snapshot_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.build_snapshot_table", + "macro_sql": "{% macro build_snapshot_table(strategy, sql) -%}\n {{ adapter.dispatch('build_snapshot_table', 'dbt')(strategy, sql) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__build_snapshot_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.203469, + "supported_languages": null + }, + "macro.dbt.default__build_snapshot_table": { + "name": "default__build_snapshot_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.default__build_snapshot_table", + "macro_sql": "{% macro default__build_snapshot_table(strategy, sql) %}\n\n select *,\n {{ strategy.scd_id }} as dbt_scd_id,\n {{ strategy.updated_at }} as dbt_updated_at,\n {{ strategy.updated_at }} as dbt_valid_from,\n nullif({{ strategy.updated_at }}, {{ strategy.updated_at }}) as dbt_valid_to\n from (\n {{ sql }}\n ) sbq\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.203766, + "supported_languages": null + }, + "macro.dbt.build_snapshot_staging_table": { + "name": "build_snapshot_staging_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/helpers.sql", + "original_file_path": "macros/materializations/snapshots/helpers.sql", + "unique_id": "macro.dbt.build_snapshot_staging_table", + "macro_sql": "{% macro build_snapshot_staging_table(strategy, sql, target_relation) %}\n {% set temp_relation = make_temp_relation(target_relation) %}\n\n {% set select = snapshot_staging_table(strategy, sql, target_relation) %}\n\n {% call statement('build_snapshot_staging_relation') %}\n {{ create_table_as(True, temp_relation, select) }}\n {% endcall %}\n\n {% do return(temp_relation) %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.make_temp_relation", + "macro.dbt.snapshot_staging_table", + "macro.dbt.statement", + "macro.dbt.create_table_as" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2042692, + "supported_languages": null + }, + "macro.dbt.materialization_snapshot_default": { + "name": "materialization_snapshot_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/snapshots/snapshot.sql", + "original_file_path": "macros/materializations/snapshots/snapshot.sql", + "unique_id": "macro.dbt.materialization_snapshot_default", + "macro_sql": "{% materialization snapshot, default %}\n {%- set config = model['config'] -%}\n\n {%- set target_table = model.get('alias', model.get('name')) -%}\n\n {%- set strategy_name = config.get('strategy') -%}\n {%- set unique_key = config.get('unique_key') %}\n -- grab current tables grants config for comparision later on\n {%- set grant_config = config.get('grants') -%}\n\n {% set target_relation_exists, target_relation = get_or_create_relation(\n database=model.database,\n schema=model.schema,\n identifier=target_table,\n type='table') -%}\n\n {%- if not target_relation.is_table -%}\n {% do exceptions.relation_wrong_type(target_relation, 'table') %}\n {%- endif -%}\n\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set strategy_macro = strategy_dispatch(strategy_name) %}\n {% set strategy = strategy_macro(model, \"snapshotted_data\", \"source_data\", config, target_relation_exists) %}\n\n {% if not target_relation_exists %}\n\n {% set build_sql = build_snapshot_table(strategy, model['compiled_code']) %}\n {% set final_sql = create_table_as(False, target_relation, build_sql) %}\n\n {% else %}\n\n {{ adapter.valid_snapshot_target(target_relation) }}\n\n {% set staging_table = build_snapshot_staging_table(strategy, sql, target_relation) %}\n\n -- this may no-op if the database does not require column expansion\n {% do adapter.expand_target_column_types(from_relation=staging_table,\n to_relation=target_relation) %}\n\n {% set missing_columns = adapter.get_missing_columns(staging_table, target_relation)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% do create_columns(target_relation, missing_columns) %}\n\n {% set source_columns = adapter.get_columns_in_relation(staging_table)\n | rejectattr('name', 'equalto', 'dbt_change_type')\n | rejectattr('name', 'equalto', 'DBT_CHANGE_TYPE')\n | rejectattr('name', 'equalto', 'dbt_unique_key')\n | rejectattr('name', 'equalto', 'DBT_UNIQUE_KEY')\n | list %}\n\n {% set quoted_source_columns = [] %}\n {% for column in source_columns %}\n {% do quoted_source_columns.append(adapter.quote(column.name)) %}\n {% endfor %}\n\n {% set final_sql = snapshot_merge_sql(\n target = target_relation,\n source = staging_table,\n insert_cols = quoted_source_columns\n )\n %}\n\n {% endif %}\n\n {% call statement('main') %}\n {{ final_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(target_relation_exists, full_refresh_mode=False) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if not target_relation_exists %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {% if staging_table is defined %}\n {% do post_snapshot(staging_table) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt.get_or_create_relation", + "macro.dbt.run_hooks", + "macro.dbt.strategy_dispatch", + "macro.dbt.build_snapshot_table", + "macro.dbt.create_table_as", + "macro.dbt.build_snapshot_staging_table", + "macro.dbt.create_columns", + "macro.dbt.snapshot_merge_sql", + "macro.dbt.statement", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs", + "macro.dbt.create_indexes", + "macro.dbt.post_snapshot" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2111301, + "supported_languages": ["sql"] + }, + "macro.dbt.materialization_test_default": { + "name": "materialization_test_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/tests/test.sql", + "original_file_path": "macros/materializations/tests/test.sql", + "unique_id": "macro.dbt.materialization_test_default", + "macro_sql": "{%- materialization test, default -%}\n\n {% set relations = [] %}\n\n {% if should_store_failures() %}\n\n {% set identifier = model['alias'] %}\n {% set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n {% set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database, type='table') -%} %}\n\n {% if old_relation %}\n {% do adapter.drop_relation(old_relation) %}\n {% endif %}\n\n {% call statement(auto_begin=True) %}\n {{ create_table_as(False, target_relation, sql) }}\n {% endcall %}\n\n {% do relations.append(target_relation) %}\n\n {% set main_sql %}\n select *\n from {{ target_relation }}\n {% endset %}\n\n {{ adapter.commit() }}\n\n {% else %}\n\n {% set main_sql = sql %}\n\n {% endif %}\n\n {% set limit = config.get('limit') %}\n {% set fail_calc = config.get('fail_calc') %}\n {% set warn_if = config.get('warn_if') %}\n {% set error_if = config.get('error_if') %}\n\n {% call statement('main', fetch_result=True) -%}\n\n {{ get_test_sql(main_sql, fail_calc, warn_if, error_if, limit)}}\n\n {%- endcall %}\n\n {{ return({'relations': relations}) }}\n\n{%- endmaterialization -%}", + "depends_on": { + "macros": [ + "macro.dbt.should_store_failures", + "macro.dbt.statement", + "macro.dbt.create_table_as", + "macro.dbt.get_test_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.213342, + "supported_languages": ["sql"] + }, + "macro.dbt.get_test_sql": { + "name": "get_test_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/tests/helpers.sql", + "original_file_path": "macros/materializations/tests/helpers.sql", + "unique_id": "macro.dbt.get_test_sql", + "macro_sql": "{% macro get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n {{ adapter.dispatch('get_test_sql', 'dbt')(main_sql, fail_calc, warn_if, error_if, limit) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_test_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.213866, + "supported_languages": null + }, + "macro.dbt.default__get_test_sql": { + "name": "default__get_test_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/tests/helpers.sql", + "original_file_path": "macros/materializations/tests/helpers.sql", + "unique_id": "macro.dbt.default__get_test_sql", + "macro_sql": "{% macro default__get_test_sql(main_sql, fail_calc, warn_if, error_if, limit) -%}\n select\n {{ fail_calc }} as failures,\n {{ fail_calc }} {{ warn_if }} as should_warn,\n {{ fail_calc }} {{ error_if }} as should_error\n from (\n {{ main_sql }}\n {{ \"limit \" ~ limit if limit != none }}\n ) dbt_internal_test\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2142148, + "supported_languages": null + }, + "macro.dbt.get_where_subquery": { + "name": "get_where_subquery", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/tests/where_subquery.sql", + "original_file_path": "macros/materializations/tests/where_subquery.sql", + "unique_id": "macro.dbt.get_where_subquery", + "macro_sql": "{% macro get_where_subquery(relation) -%}\n {% do return(adapter.dispatch('get_where_subquery', 'dbt')(relation)) %}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_where_subquery"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.214627, + "supported_languages": null + }, + "macro.dbt.default__get_where_subquery": { + "name": "default__get_where_subquery", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/tests/where_subquery.sql", + "original_file_path": "macros/materializations/tests/where_subquery.sql", + "unique_id": "macro.dbt.default__get_where_subquery", + "macro_sql": "{% macro default__get_where_subquery(relation) -%}\n {% set where = config.get('where', '') %}\n {% if where %}\n {%- set filtered -%}\n (select * from {{ relation }} where {{ where }}) dbt_subquery\n {%- endset -%}\n {% do return(filtered) %}\n {%- else -%}\n {% do return(relation) %}\n {%- endif -%}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.215032, + "supported_languages": null + }, + "macro.dbt.get_quoted_csv": { + "name": "get_quoted_csv", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/column_helpers.sql", + "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", + "unique_id": "macro.dbt.get_quoted_csv", + "macro_sql": "{% macro get_quoted_csv(column_names) %}\n\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote(col)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.216692, + "supported_languages": null + }, + "macro.dbt.diff_columns": { + "name": "diff_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/column_helpers.sql", + "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", + "unique_id": "macro.dbt.diff_columns", + "macro_sql": "{% macro diff_columns(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% set source_names = source_columns | map(attribute = 'column') | list %}\n {% set target_names = target_columns | map(attribute = 'column') | list %}\n\n {# --check whether the name attribute exists in the target - this does not perform a data type check #}\n {% for sc in source_columns %}\n {% if sc.name not in target_names %}\n {{ result.append(sc) }}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.217275, + "supported_languages": null + }, + "macro.dbt.diff_column_data_types": { + "name": "diff_column_data_types", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/column_helpers.sql", + "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", + "unique_id": "macro.dbt.diff_column_data_types", + "macro_sql": "{% macro diff_column_data_types(source_columns, target_columns) %}\n\n {% set result = [] %}\n {% for sc in source_columns %}\n {% set tc = target_columns | selectattr(\"name\", \"equalto\", sc.name) | list | first %}\n {% if tc %}\n {% if sc.data_type != tc.data_type and not sc.can_expand_to(other_column=tc) %}\n {{ result.append( { 'column_name': tc.name, 'new_type': sc.data_type } ) }}\n {% endif %}\n {% endif %}\n {% endfor %}\n\n {{ return(result) }}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.217984, + "supported_languages": null + }, + "macro.dbt.get_merge_update_columns": { + "name": "get_merge_update_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/column_helpers.sql", + "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", + "unique_id": "macro.dbt.get_merge_update_columns", + "macro_sql": "{% macro get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {{ return(adapter.dispatch('get_merge_update_columns', 'dbt')(merge_update_columns, merge_exclude_columns, dest_columns)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_merge_update_columns"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.218246, + "supported_languages": null + }, + "macro.dbt.default__get_merge_update_columns": { + "name": "default__get_merge_update_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/column_helpers.sql", + "original_file_path": "macros/materializations/models/incremental/column_helpers.sql", + "unique_id": "macro.dbt.default__get_merge_update_columns", + "macro_sql": "{% macro default__get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) %}\n {%- set default_cols = dest_columns | map(attribute=\"quoted\") | list -%}\n\n {%- if merge_update_columns and merge_exclude_columns -%}\n {{ exceptions.raise_compiler_error(\n 'Model cannot specify merge_update_columns and merge_exclude_columns. Please update model to use only one config'\n )}}\n {%- elif merge_update_columns -%}\n {%- set update_columns = merge_update_columns -%}\n {%- elif merge_exclude_columns -%}\n {%- set update_columns = [] -%}\n {%- for column in dest_columns -%}\n {% if column.column | lower not in merge_exclude_columns | map(\"lower\") | list %}\n {%- do update_columns.append(column.quoted) -%}\n {% endif %}\n {%- endfor -%}\n {%- else -%}\n {%- set update_columns = default_cols -%}\n {%- endif -%}\n\n {{ return(update_columns) }}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.21903, + "supported_languages": null + }, + "macro.dbt.get_merge_sql": { + "name": "get_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/merge.sql", + "original_file_path": "macros/materializations/models/incremental/merge.sql", + "unique_id": "macro.dbt.get_merge_sql", + "macro_sql": "{% macro get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%}\n -- back compat for old kwarg name\n {% set incremental_predicates = kwargs.get('predicates', incremental_predicates) %}\n {{ adapter.dispatch('get_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__get_merge_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.225936, + "supported_languages": null + }, + "macro.dbt.default__get_merge_sql": { + "name": "default__get_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/merge.sql", + "original_file_path": "macros/materializations/models/incremental/merge.sql", + "unique_id": "macro.dbt.default__get_merge_sql", + "macro_sql": "{% macro default__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%}\n {%- set predicates = [] if incremental_predicates is none else [] + incremental_predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set merge_update_columns = config.get('merge_update_columns') -%}\n {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}\n {%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not mapping and unique_key is not string %}\n {% for key in unique_key %}\n {% set this_key_match %}\n DBT_INTERNAL_SOURCE.{{ key }} = DBT_INTERNAL_DEST.{{ key }}\n {% endset %}\n {% do predicates.append(this_key_match) %}\n {% endfor %}\n {% else %}\n {% set unique_key_match %}\n DBT_INTERNAL_SOURCE.{{ unique_key }} = DBT_INTERNAL_DEST.{{ unique_key }}\n {% endset %}\n {% do predicates.append(unique_key_match) %}\n {% endif %}\n {% else %}\n {% do predicates.append('FALSE') %}\n {% endif %}\n\n {{ sql_header if sql_header is not none }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on {{\"(\" ~ predicates | join(\") and (\") ~ \")\"}}\n\n {% if unique_key %}\n when matched then update set\n {% for column_name in update_columns -%}\n {{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}\n {%- if not loop.last %}, {%- endif %}\n {%- endfor %}\n {% endif %}\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_quoted_csv", + "macro.dbt.get_merge_update_columns" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2277288, + "supported_languages": null + }, + "macro.dbt.get_delete_insert_merge_sql": { + "name": "get_delete_insert_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/merge.sql", + "original_file_path": "macros/materializations/models/incremental/merge.sql", + "unique_id": "macro.dbt.get_delete_insert_merge_sql", + "macro_sql": "{% macro get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%}\n {{ adapter.dispatch('get_delete_insert_merge_sql', 'dbt')(target, source, unique_key, dest_columns, incremental_predicates) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__get_delete_insert_merge_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.228014, + "supported_languages": null + }, + "macro.dbt.default__get_delete_insert_merge_sql": { + "name": "default__get_delete_insert_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/merge.sql", + "original_file_path": "macros/materializations/models/incremental/merge.sql", + "unique_id": "macro.dbt.default__get_delete_insert_merge_sql", + "macro_sql": "{% macro default__get_delete_insert_merge_sql(target, source, unique_key, dest_columns, incremental_predicates) -%}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n {% if unique_key %}\n {% if unique_key is sequence and unique_key is not string %}\n delete from {{target }}\n using {{ source }}\n where (\n {% for key in unique_key %}\n {{ source }}.{{ key }} = {{ target }}.{{ key }}\n {{ \"and \" if not loop.last}}\n {% endfor %}\n {% if incremental_predicates %}\n {% for predicate in incremental_predicates %}\n and {{ predicate }}\n {% endfor %}\n {% endif %}\n );\n {% else %}\n delete from {{ target }}\n where (\n {{ unique_key }}) in (\n select ({{ unique_key }})\n from {{ source }}\n )\n {%- if incremental_predicates %}\n {% for predicate in incremental_predicates %}\n and {{ predicate }}\n {% endfor %}\n {%- endif -%};\n\n {% endif %}\n {% endif %}\n\n insert into {{ target }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ source }}\n )\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_quoted_csv"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2290711, + "supported_languages": null + }, + "macro.dbt.get_insert_overwrite_merge_sql": { + "name": "get_insert_overwrite_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/merge.sql", + "original_file_path": "macros/materializations/models/incremental/merge.sql", + "unique_id": "macro.dbt.get_insert_overwrite_merge_sql", + "macro_sql": "{% macro get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header=false) -%}\n {{ adapter.dispatch('get_insert_overwrite_merge_sql', 'dbt')(target, source, dest_columns, predicates, include_sql_header) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_insert_overwrite_merge_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.229374, + "supported_languages": null + }, + "macro.dbt.default__get_insert_overwrite_merge_sql": { + "name": "default__get_insert_overwrite_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/merge.sql", + "original_file_path": "macros/materializations/models/incremental/merge.sql", + "unique_id": "macro.dbt.default__get_insert_overwrite_merge_sql", + "macro_sql": "{% macro default__get_insert_overwrite_merge_sql(target, source, dest_columns, predicates, include_sql_header) -%}\n {#-- The only time include_sql_header is True: --#}\n {#-- BigQuery + insert_overwrite strategy + \"static\" partitions config --#}\n {#-- We should consider including the sql header at the materialization level instead --#}\n\n {%- set predicates = [] if predicates is none else [] + predicates -%}\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none and include_sql_header }}\n\n merge into {{ target }} as DBT_INTERNAL_DEST\n using {{ source }} as DBT_INTERNAL_SOURCE\n on FALSE\n\n when not matched by source\n {% if predicates %} and {{ predicates | join(' and ') }} {% endif %}\n then delete\n\n when not matched then insert\n ({{ dest_cols_csv }})\n values\n ({{ dest_cols_csv }})\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_quoted_csv"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.230069, + "supported_languages": null + }, + "macro.dbt.is_incremental": { + "name": "is_incremental", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/is_incremental.sql", + "original_file_path": "macros/materializations/models/incremental/is_incremental.sql", + "unique_id": "macro.dbt.is_incremental", + "macro_sql": "{% macro is_incremental() %}\n {#-- do not run introspective queries in parsing #}\n {% if not execute %}\n {{ return(False) }}\n {% else %}\n {% set relation = adapter.get_relation(this.database, this.schema, this.table) %}\n {{ return(relation is not none\n and relation.type == 'table'\n and model.config.materialized == 'incremental'\n and not should_full_refresh()) }}\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.should_full_refresh"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2309601, + "supported_languages": null + }, + "macro.dbt.get_incremental_append_sql": { + "name": "get_incremental_append_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.get_incremental_append_sql", + "macro_sql": "{% macro get_incremental_append_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_append_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_incremental_append_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.232094, + "supported_languages": null + }, + "macro.dbt.default__get_incremental_append_sql": { + "name": "default__get_incremental_append_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.default__get_incremental_append_sql", + "macro_sql": "{% macro default__get_incremental_append_sql(arg_dict) %}\n\n {% do return(get_insert_into_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"])) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_insert_into_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.232426, + "supported_languages": null + }, + "macro.dbt.get_incremental_delete_insert_sql": { + "name": "get_incremental_delete_insert_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.get_incremental_delete_insert_sql", + "macro_sql": "{% macro get_incremental_delete_insert_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_delete_insert_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_incremental_delete_insert_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2327669, + "supported_languages": null + }, + "macro.dbt.default__get_incremental_delete_insert_sql": { + "name": "default__get_incremental_delete_insert_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.default__get_incremental_delete_insert_sql", + "macro_sql": "{% macro default__get_incremental_delete_insert_sql(arg_dict) %}\n\n {% do return(get_delete_insert_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"], arg_dict[\"incremental_predicates\"])) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_delete_insert_merge_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.233175, + "supported_languages": null + }, + "macro.dbt.get_incremental_merge_sql": { + "name": "get_incremental_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.get_incremental_merge_sql", + "macro_sql": "{% macro get_incremental_merge_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_merge_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_incremental_merge_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.233388, + "supported_languages": null + }, + "macro.dbt.default__get_incremental_merge_sql": { + "name": "default__get_incremental_merge_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.default__get_incremental_merge_sql", + "macro_sql": "{% macro default__get_incremental_merge_sql(arg_dict) %}\n\n {% do return(get_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"unique_key\"], arg_dict[\"dest_columns\"], arg_dict[\"incremental_predicates\"])) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_merge_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2336981, + "supported_languages": null + }, + "macro.dbt.get_incremental_insert_overwrite_sql": { + "name": "get_incremental_insert_overwrite_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.get_incremental_insert_overwrite_sql", + "macro_sql": "{% macro get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_insert_overwrite_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_incremental_insert_overwrite_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2338939, + "supported_languages": null + }, + "macro.dbt.default__get_incremental_insert_overwrite_sql": { + "name": "default__get_incremental_insert_overwrite_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.default__get_incremental_insert_overwrite_sql", + "macro_sql": "{% macro default__get_incremental_insert_overwrite_sql(arg_dict) %}\n\n {% do return(get_insert_overwrite_merge_sql(arg_dict[\"target_relation\"], arg_dict[\"temp_relation\"], arg_dict[\"dest_columns\"], arg_dict[\"incremental_predicates\"])) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_insert_overwrite_merge_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2341719, + "supported_languages": null + }, + "macro.dbt.get_incremental_default_sql": { + "name": "get_incremental_default_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.get_incremental_default_sql", + "macro_sql": "{% macro get_incremental_default_sql(arg_dict) %}\n\n {{ return(adapter.dispatch('get_incremental_default_sql', 'dbt')(arg_dict)) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__get_incremental_default_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.234366, + "supported_languages": null + }, + "macro.dbt.default__get_incremental_default_sql": { + "name": "default__get_incremental_default_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.default__get_incremental_default_sql", + "macro_sql": "{% macro default__get_incremental_default_sql(arg_dict) %}\n\n {% do return(get_incremental_append_sql(arg_dict)) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_incremental_append_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.234522, + "supported_languages": null + }, + "macro.dbt.get_insert_into_sql": { + "name": "get_insert_into_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/strategies.sql", + "original_file_path": "macros/materializations/models/incremental/strategies.sql", + "unique_id": "macro.dbt.get_insert_into_sql", + "macro_sql": "{% macro get_insert_into_sql(target_relation, temp_relation, dest_columns) %}\n\n {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute=\"name\")) -%}\n\n insert into {{ target_relation }} ({{ dest_cols_csv }})\n (\n select {{ dest_cols_csv }}\n from {{ temp_relation }}\n )\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_quoted_csv"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.234826, + "supported_languages": null + }, + "macro.dbt.materialization_incremental_default": { + "name": "materialization_incremental_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/incremental.sql", + "original_file_path": "macros/materializations/models/incremental/incremental.sql", + "unique_id": "macro.dbt.materialization_incremental_default", + "macro_sql": "{% materialization incremental, default -%}\n\n -- relations\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') -%}\n {%- set temp_relation = make_temp_relation(target_relation)-%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation)-%}\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n\n -- configs\n {%- set unique_key = config.get('unique_key') -%}\n {%- set full_refresh_mode = (should_full_refresh() or existing_relation.is_view) -%}\n {%- set on_schema_change = incremental_validate_on_schema_change(config.get('on_schema_change'), default='ignore') -%}\n\n -- the temp_ and backup_ relations should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation. This has to happen before\n -- BEGIN, in a separate transaction\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation)-%}\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set to_drop = [] %}\n\n {% if existing_relation is none %}\n {% set build_sql = get_create_table_as_sql(False, target_relation, sql) %}\n {% elif full_refresh_mode %}\n {% set build_sql = get_create_table_as_sql(False, intermediate_relation, sql) %}\n {% set need_swap = true %}\n {% else %}\n {% do run_query(get_create_table_as_sql(True, temp_relation, sql)) %}\n {% do adapter.expand_target_column_types(\n from_relation=temp_relation,\n to_relation=target_relation) %}\n {#-- Process schema changes. Returns dict of changes if successful. Use source columns for upserting/merging --#}\n {% set dest_columns = process_schema_changes(on_schema_change, temp_relation, existing_relation) %}\n {% if not dest_columns %}\n {% set dest_columns = adapter.get_columns_in_relation(existing_relation) %}\n {% endif %}\n\n {#-- Get the incremental_strategy, the macro to use for the strategy, and build the sql --#}\n {% set incremental_strategy = config.get('incremental_strategy') or 'default' %}\n {% set incremental_predicates = config.get('predicates', none) or config.get('incremental_predicates', none) %}\n {% set strategy_sql_macro_func = adapter.get_incremental_strategy_macro(context, incremental_strategy) %}\n {% set strategy_arg_dict = ({'target_relation': target_relation, 'temp_relation': temp_relation, 'unique_key': unique_key, 'dest_columns': dest_columns, 'incremental_predicates': incremental_predicates }) %}\n {% set build_sql = strategy_sql_macro_func(strategy_arg_dict) %}\n\n {% endif %}\n\n {% call statement(\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% if need_swap %}\n {% do adapter.rename_relation(target_relation, backup_relation) %}\n {% do adapter.rename_relation(intermediate_relation, target_relation) %}\n {% do to_drop.append(backup_relation) %}\n {% endif %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if existing_relation is none or existing_relation.is_view or should_full_refresh() %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {% do adapter.commit() %}\n\n {% for rel in to_drop %}\n {% do adapter.drop_relation(rel) %}\n {% endfor %}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt.load_cached_relation", + "macro.dbt.make_temp_relation", + "macro.dbt.make_intermediate_relation", + "macro.dbt.make_backup_relation", + "macro.dbt.should_full_refresh", + "macro.dbt.incremental_validate_on_schema_change", + "macro.dbt.drop_relation_if_exists", + "macro.dbt.run_hooks", + "macro.dbt.get_create_table_as_sql", + "macro.dbt.run_query", + "macro.dbt.process_schema_changes", + "macro.dbt.statement", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs", + "macro.dbt.create_indexes" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2400708, + "supported_languages": ["sql"] + }, + "macro.dbt.incremental_validate_on_schema_change": { + "name": "incremental_validate_on_schema_change", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/on_schema_change.sql", + "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", + "unique_id": "macro.dbt.incremental_validate_on_schema_change", + "macro_sql": "{% macro incremental_validate_on_schema_change(on_schema_change, default='ignore') %}\n\n {% if on_schema_change not in ['sync_all_columns', 'append_new_columns', 'fail', 'ignore'] %}\n\n {% set log_message = 'Invalid value for on_schema_change (%s) specified. Setting default value of %s.' % (on_schema_change, default) %}\n {% do log(log_message) %}\n\n {{ return(default) }}\n\n {% else %}\n\n {{ return(on_schema_change) }}\n\n {% endif %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.245894, + "supported_languages": null + }, + "macro.dbt.check_for_schema_changes": { + "name": "check_for_schema_changes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/on_schema_change.sql", + "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", + "unique_id": "macro.dbt.check_for_schema_changes", + "macro_sql": "{% macro check_for_schema_changes(source_relation, target_relation) %}\n\n {% set schema_changed = False %}\n\n {%- set source_columns = adapter.get_columns_in_relation(source_relation) -%}\n {%- set target_columns = adapter.get_columns_in_relation(target_relation) -%}\n {%- set source_not_in_target = diff_columns(source_columns, target_columns) -%}\n {%- set target_not_in_source = diff_columns(target_columns, source_columns) -%}\n\n {% set new_target_types = diff_column_data_types(source_columns, target_columns) %}\n\n {% if source_not_in_target != [] %}\n {% set schema_changed = True %}\n {% elif target_not_in_source != [] or new_target_types != [] %}\n {% set schema_changed = True %}\n {% elif new_target_types != [] %}\n {% set schema_changed = True %}\n {% endif %}\n\n {% set changes_dict = {\n 'schema_changed': schema_changed,\n 'source_not_in_target': source_not_in_target,\n 'target_not_in_source': target_not_in_source,\n 'source_columns': source_columns,\n 'target_columns': target_columns,\n 'new_target_types': new_target_types\n } %}\n\n {% set msg %}\n In {{ target_relation }}:\n Schema changed: {{ schema_changed }}\n Source columns not in target: {{ source_not_in_target }}\n Target columns not in source: {{ target_not_in_source }}\n New column types: {{ new_target_types }}\n {% endset %}\n\n {% do log(msg) %}\n\n {{ return(changes_dict) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.diff_columns", "macro.dbt.diff_column_data_types"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.247189, + "supported_languages": null + }, + "macro.dbt.sync_column_schemas": { + "name": "sync_column_schemas", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/on_schema_change.sql", + "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", + "unique_id": "macro.dbt.sync_column_schemas", + "macro_sql": "{% macro sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {%- set add_to_target_arr = schema_changes_dict['source_not_in_target'] -%}\n\n {%- if on_schema_change == 'append_new_columns'-%}\n {%- if add_to_target_arr | length > 0 -%}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, none) -%}\n {%- endif -%}\n\n {% elif on_schema_change == 'sync_all_columns' %}\n {%- set remove_from_target_arr = schema_changes_dict['target_not_in_source'] -%}\n {%- set new_target_types = schema_changes_dict['new_target_types'] -%}\n\n {% if add_to_target_arr | length > 0 or remove_from_target_arr | length > 0 %}\n {%- do alter_relation_add_remove_columns(target_relation, add_to_target_arr, remove_from_target_arr) -%}\n {% endif %}\n\n {% if new_target_types != [] %}\n {% for ntt in new_target_types %}\n {% set column_name = ntt['column_name'] %}\n {% set new_type = ntt['new_type'] %}\n {% do alter_column_type(target_relation, column_name, new_type) %}\n {% endfor %}\n {% endif %}\n\n {% endif %}\n\n {% set schema_change_message %}\n In {{ target_relation }}:\n Schema change approach: {{ on_schema_change }}\n Columns added: {{ add_to_target_arr }}\n Columns removed: {{ remove_from_target_arr }}\n Data types changed: {{ new_target_types }}\n {% endset %}\n\n {% do log(schema_change_message) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.alter_relation_add_remove_columns", + "macro.dbt.alter_column_type" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2487931, + "supported_languages": null + }, + "macro.dbt.process_schema_changes": { + "name": "process_schema_changes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/incremental/on_schema_change.sql", + "original_file_path": "macros/materializations/models/incremental/on_schema_change.sql", + "unique_id": "macro.dbt.process_schema_changes", + "macro_sql": "{% macro process_schema_changes(on_schema_change, source_relation, target_relation) %}\n\n {% if on_schema_change == 'ignore' %}\n\n {{ return({}) }}\n\n {% else %}\n\n {% set schema_changes_dict = check_for_schema_changes(source_relation, target_relation) %}\n\n {% if schema_changes_dict['schema_changed'] %}\n\n {% if on_schema_change == 'fail' %}\n\n {% set fail_msg %}\n The source and target schemas on this incremental model are out of sync!\n They can be reconciled in several ways:\n - set the `on_schema_change` config to either append_new_columns or sync_all_columns, depending on your situation.\n - Re-run the incremental model with `full_refresh: True` to update the target schema.\n - update the schema manually and re-run the process.\n\n Additional troubleshooting context:\n Source columns not in target: {{ schema_changes_dict['source_not_in_target'] }}\n Target columns not in source: {{ schema_changes_dict['target_not_in_source'] }}\n New column types: {{ schema_changes_dict['new_target_types'] }}\n {% endset %}\n\n {% do exceptions.raise_compiler_error(fail_msg) %}\n\n {# -- unless we ignore, run the sync operation per the config #}\n {% else %}\n\n {% do sync_column_schemas(on_schema_change, target_relation, schema_changes_dict) %}\n\n {% endif %}\n\n {% endif %}\n\n {{ return(schema_changes_dict['source_columns']) }}\n\n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.check_for_schema_changes", + "macro.dbt.sync_column_schemas" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2500448, + "supported_languages": null + }, + "macro.dbt.materialization_materialized_view_default": { + "name": "materialization_materialized_view_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/materialized_view.sql", + "unique_id": "macro.dbt.materialization_materialized_view_default", + "macro_sql": "{% materialization materialized_view, default %}\n {% set existing_relation = load_cached_relation(this) %}\n {% set target_relation = this.incorporate(type=this.MaterializedView) %}\n {% set intermediate_relation = make_intermediate_relation(target_relation) %}\n {% set backup_relation_type = target_relation.MaterializedView if existing_relation is none else existing_relation.type %}\n {% set backup_relation = make_backup_relation(target_relation, backup_relation_type) %}\n\n {{ materialized_view_setup(backup_relation, intermediate_relation, pre_hooks) }}\n\n {% set build_sql = materialized_view_get_build_sql(existing_relation, target_relation, backup_relation, intermediate_relation) %}\n\n {% if build_sql == '' %}\n {{ materialized_view_execute_no_op(target_relation) }}\n {% else %}\n {{ materialized_view_execute_build_sql(build_sql, existing_relation, target_relation, post_hooks) }}\n {% endif %}\n\n {{ materialized_view_teardown(backup_relation, intermediate_relation, post_hooks) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt.load_cached_relation", + "macro.dbt.make_intermediate_relation", + "macro.dbt.make_backup_relation", + "macro.dbt.materialized_view_setup", + "macro.dbt.materialized_view_get_build_sql", + "macro.dbt.materialized_view_execute_no_op", + "macro.dbt.materialized_view_execute_build_sql", + "macro.dbt.materialized_view_teardown" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2555, + "supported_languages": ["sql"] + }, + "macro.dbt.materialized_view_setup": { + "name": "materialized_view_setup", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/materialized_view.sql", + "unique_id": "macro.dbt.materialized_view_setup", + "macro_sql": "{% macro materialized_view_setup(backup_relation, intermediate_relation, pre_hooks) %}\n\n -- backup_relation and intermediate_relation should not already exist in the database\n -- it's possible these exist because of a previous run that exited unexpectedly\n {% set preexisting_backup_relation = load_cached_relation(backup_relation) %}\n {% set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) %}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.load_cached_relation", + "macro.dbt.drop_relation_if_exists", + "macro.dbt.run_hooks" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2559412, + "supported_languages": null + }, + "macro.dbt.materialized_view_teardown": { + "name": "materialized_view_teardown", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/materialized_view.sql", + "unique_id": "macro.dbt.materialized_view_teardown", + "macro_sql": "{% macro materialized_view_teardown(backup_relation, intermediate_relation, post_hooks) %}\n\n -- drop the temp relations if they exist to leave the database clean for the next run\n {{ drop_relation_if_exists(backup_relation) }}\n {{ drop_relation_if_exists(intermediate_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.drop_relation_if_exists", "macro.dbt.run_hooks"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.256222, + "supported_languages": null + }, + "macro.dbt.materialized_view_get_build_sql": { + "name": "materialized_view_get_build_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/materialized_view.sql", + "unique_id": "macro.dbt.materialized_view_get_build_sql", + "macro_sql": "{% macro materialized_view_get_build_sql(existing_relation, target_relation, backup_relation, intermediate_relation) %}\n\n {% set full_refresh_mode = should_full_refresh() %}\n\n -- determine the scenario we're in: create, full_refresh, alter, refresh data\n {% if existing_relation is none %}\n {% set build_sql = get_create_materialized_view_as_sql(target_relation, sql) %}\n {% elif full_refresh_mode or not existing_relation.is_materialized_view %}\n {% set build_sql = get_replace_materialized_view_as_sql(target_relation, sql, existing_relation, backup_relation, intermediate_relation) %}\n {% else %}\n\n -- get config options\n {% set on_configuration_change = config.get('on_configuration_change') %}\n {% set configuration_changes = get_materialized_view_configuration_changes(existing_relation, config) %}\n\n {% if configuration_changes is none %}\n {% set build_sql = refresh_materialized_view(target_relation) %}\n\n {% elif on_configuration_change == 'apply' %}\n {% set build_sql = get_alter_materialized_view_as_sql(target_relation, configuration_changes, sql, existing_relation, backup_relation, intermediate_relation) %}\n {% elif on_configuration_change == 'continue' %}\n {% set build_sql = '' %}\n {{ exceptions.warn(\"Configuration changes were identified and `on_configuration_change` was set to `continue` for `\" ~ target_relation ~ \"`\") }}\n {% elif on_configuration_change == 'fail' %}\n {{ exceptions.raise_fail_fast_error(\"Configuration changes were identified and `on_configuration_change` was set to `fail` for `\" ~ target_relation ~ \"`\") }}\n\n {% else %}\n -- this only happens if the user provides a value other than `apply`, 'skip', 'fail'\n {{ exceptions.raise_compiler_error(\"Unexpected configuration scenario\") }}\n\n {% endif %}\n\n {% endif %}\n\n {% do return(build_sql) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.should_full_refresh", + "macro.dbt.get_create_materialized_view_as_sql", + "macro.dbt.get_replace_materialized_view_as_sql", + "macro.dbt.get_materialized_view_configuration_changes", + "macro.dbt.refresh_materialized_view", + "macro.dbt.get_alter_materialized_view_as_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.257633, + "supported_languages": null + }, + "macro.dbt.materialized_view_execute_no_op": { + "name": "materialized_view_execute_no_op", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/materialized_view.sql", + "unique_id": "macro.dbt.materialized_view_execute_no_op", + "macro_sql": "{% macro materialized_view_execute_no_op(target_relation) %}\n {% do store_raw_result(\n name=\"main\",\n message=\"skip \" ~ target_relation,\n code=\"skip\",\n rows_affected=\"-1\"\n ) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2581272, + "supported_languages": null + }, + "macro.dbt.materialized_view_execute_build_sql": { + "name": "materialized_view_execute_build_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/materialized_view.sql", + "unique_id": "macro.dbt.materialized_view_execute_build_sql", + "macro_sql": "{% macro materialized_view_execute_build_sql(build_sql, existing_relation, target_relation, post_hooks) %}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n {% set grant_config = config.get('grants') %}\n\n {% call statement(name=\"main\") %}\n {{ build_sql }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.run_hooks", + "macro.dbt.statement", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.258873, + "supported_languages": null + }, + "macro.dbt.get_materialized_view_configuration_changes": { + "name": "get_materialized_view_configuration_changes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/get_materialized_view_configuration_changes.sql", + "original_file_path": "macros/materializations/models/materialized_view/get_materialized_view_configuration_changes.sql", + "unique_id": "macro.dbt.get_materialized_view_configuration_changes", + "macro_sql": "{% macro get_materialized_view_configuration_changes(existing_relation, new_config) %}\n /* {#\n It's recommended that configuration changes be formatted as follows:\n {\"\": [{\"action\": \"\", \"context\": ...}]}\n\n For example:\n {\n \"indexes\": [\n {\"action\": \"drop\", \"context\": \"index_abc\"},\n {\"action\": \"create\", \"context\": {\"columns\": [\"column_1\", \"column_2\"], \"type\": \"hash\", \"unique\": True}},\n ],\n }\n\n Either way, `get_materialized_view_configuration_changes` needs to align with `get_alter_materialized_view_as_sql`.\n #} */\n {{- log('Determining configuration changes on: ' ~ existing_relation) -}}\n {%- do return(adapter.dispatch('get_materialized_view_configuration_changes', 'dbt')(existing_relation, new_config)) -%}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.default__get_materialized_view_configuration_changes" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.259373, + "supported_languages": null + }, + "macro.dbt.default__get_materialized_view_configuration_changes": { + "name": "default__get_materialized_view_configuration_changes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/get_materialized_view_configuration_changes.sql", + "original_file_path": "macros/materializations/models/materialized_view/get_materialized_view_configuration_changes.sql", + "unique_id": "macro.dbt.default__get_materialized_view_configuration_changes", + "macro_sql": "{% macro default__get_materialized_view_configuration_changes(existing_relation, new_config) %}\n {{ exceptions.raise_compiler_error(\"Materialized views have not been implemented for this adapter.\") }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.259535, + "supported_languages": null + }, + "macro.dbt.get_alter_materialized_view_as_sql": { + "name": "get_alter_materialized_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/alter_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/alter_materialized_view.sql", + "unique_id": "macro.dbt.get_alter_materialized_view_as_sql", + "macro_sql": "{% macro get_alter_materialized_view_as_sql(\n relation,\n configuration_changes,\n sql,\n existing_relation,\n backup_relation,\n intermediate_relation\n) %}\n {{- log('Applying ALTER to: ' ~ relation) -}}\n {{- adapter.dispatch('get_alter_materialized_view_as_sql', 'dbt')(\n relation,\n configuration_changes,\n sql,\n existing_relation,\n backup_relation,\n intermediate_relation\n ) -}}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_alter_materialized_view_as_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.260193, + "supported_languages": null + }, + "macro.dbt.default__get_alter_materialized_view_as_sql": { + "name": "default__get_alter_materialized_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/alter_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/alter_materialized_view.sql", + "unique_id": "macro.dbt.default__get_alter_materialized_view_as_sql", + "macro_sql": "{% macro default__get_alter_materialized_view_as_sql(\n relation,\n configuration_changes,\n sql,\n existing_relation,\n backup_relation,\n intermediate_relation\n) %}\n {{ exceptions.raise_compiler_error(\"Materialized views have not been implemented for this adapter.\") }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2605062, + "supported_languages": null + }, + "macro.dbt.refresh_materialized_view": { + "name": "refresh_materialized_view", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/refresh_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/refresh_materialized_view.sql", + "unique_id": "macro.dbt.refresh_materialized_view", + "macro_sql": "{% macro refresh_materialized_view(relation) %}\n {{- log('Applying REFRESH to: ' ~ relation) -}}\n {{- adapter.dispatch('refresh_materialized_view', 'dbt')(relation) -}}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__refresh_materialized_view"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.260948, + "supported_languages": null + }, + "macro.dbt.default__refresh_materialized_view": { + "name": "default__refresh_materialized_view", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/refresh_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/refresh_materialized_view.sql", + "unique_id": "macro.dbt.default__refresh_materialized_view", + "macro_sql": "{% macro default__refresh_materialized_view(relation) %}\n {{ exceptions.raise_compiler_error(\"Materialized views have not been implemented for this adapter.\") }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.261142, + "supported_languages": null + }, + "macro.dbt.get_replace_materialized_view_as_sql": { + "name": "get_replace_materialized_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/replace_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/replace_materialized_view.sql", + "unique_id": "macro.dbt.get_replace_materialized_view_as_sql", + "macro_sql": "{% macro get_replace_materialized_view_as_sql(relation, sql, existing_relation, backup_relation, intermediate_relation) %}\n {{- log('Applying REPLACE to: ' ~ relation) -}}\n {{- adapter.dispatch('get_replace_materialized_view_as_sql', 'dbt')(relation, sql, existing_relation, backup_relation, intermediate_relation) -}}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_replace_materialized_view_as_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.261792, + "supported_languages": null + }, + "macro.dbt.default__get_replace_materialized_view_as_sql": { + "name": "default__get_replace_materialized_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/replace_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/replace_materialized_view.sql", + "unique_id": "macro.dbt.default__get_replace_materialized_view_as_sql", + "macro_sql": "{% macro default__get_replace_materialized_view_as_sql(relation, sql, existing_relation, backup_relation, intermediate_relation) %}\n {{ exceptions.raise_compiler_error(\"Materialized views have not been implemented for this adapter.\") }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.26207, + "supported_languages": null + }, + "macro.dbt.get_create_materialized_view_as_sql": { + "name": "get_create_materialized_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/create_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/create_materialized_view.sql", + "unique_id": "macro.dbt.get_create_materialized_view_as_sql", + "macro_sql": "{% macro get_create_materialized_view_as_sql(relation, sql) -%}\n {{- log('Applying CREATE to: ' ~ relation) -}}\n {{- adapter.dispatch('get_create_materialized_view_as_sql', 'dbt')(relation, sql) -}}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_create_materialized_view_as_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.26244, + "supported_languages": null + }, + "macro.dbt.default__get_create_materialized_view_as_sql": { + "name": "default__get_create_materialized_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/materialized_view/create_materialized_view.sql", + "original_file_path": "macros/materializations/models/materialized_view/create_materialized_view.sql", + "unique_id": "macro.dbt.default__get_create_materialized_view_as_sql", + "macro_sql": "{% macro default__get_create_materialized_view_as_sql(relation, sql) -%}\n {{ exceptions.raise_compiler_error(\"Materialized views have not been implemented for this adapter.\") }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.26259, + "supported_languages": null + }, + "macro.dbt.can_clone_table": { + "name": "can_clone_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/clone/can_clone_table.sql", + "original_file_path": "macros/materializations/models/clone/can_clone_table.sql", + "unique_id": "macro.dbt.can_clone_table", + "macro_sql": "{% macro can_clone_table() %}\n {{ return(adapter.dispatch('can_clone_table', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__can_clone_table"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.26283, + "supported_languages": null + }, + "macro.dbt.default__can_clone_table": { + "name": "default__can_clone_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/clone/can_clone_table.sql", + "original_file_path": "macros/materializations/models/clone/can_clone_table.sql", + "unique_id": "macro.dbt.default__can_clone_table", + "macro_sql": "{% macro default__can_clone_table() %}\n {{ return(False) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.26295, + "supported_languages": null + }, + "macro.dbt.create_or_replace_clone": { + "name": "create_or_replace_clone", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/clone/create_or_replace_clone.sql", + "original_file_path": "macros/materializations/models/clone/create_or_replace_clone.sql", + "unique_id": "macro.dbt.create_or_replace_clone", + "macro_sql": "{% macro create_or_replace_clone(this_relation, defer_relation) %}\n {{ return(adapter.dispatch('create_or_replace_clone', 'dbt')(this_relation, defer_relation)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__create_or_replace_clone"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2633579, + "supported_languages": null + }, + "macro.dbt.default__create_or_replace_clone": { + "name": "default__create_or_replace_clone", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/clone/create_or_replace_clone.sql", + "original_file_path": "macros/materializations/models/clone/create_or_replace_clone.sql", + "unique_id": "macro.dbt.default__create_or_replace_clone", + "macro_sql": "{% macro default__create_or_replace_clone(this_relation, defer_relation) %}\n create or replace table {{ this_relation }} clone {{ defer_relation }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2635012, + "supported_languages": null + }, + "macro.dbt.materialization_clone_default": { + "name": "materialization_clone_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/clone/clone.sql", + "original_file_path": "macros/materializations/models/clone/clone.sql", + "unique_id": "macro.dbt.materialization_clone_default", + "macro_sql": "{%- materialization clone, default -%}\n\n {%- set relations = {'relations': []} -%}\n\n {%- if not defer_relation -%}\n -- nothing to do\n {{ log(\"No relation found in state manifest for \" ~ model.unique_id, info=True) }}\n {{ return(relations) }}\n {%- endif -%}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n\n {%- if existing_relation and not flags.FULL_REFRESH -%}\n -- noop!\n {{ log(\"Relation \" ~ existing_relation ~ \" already exists\", info=True) }}\n {{ return(relations) }}\n {%- endif -%}\n\n {%- set other_existing_relation = load_cached_relation(defer_relation) -%}\n\n -- If this is a database that can do zero-copy cloning of tables, and the other relation is a table, then this will be a table\n -- Otherwise, this will be a view\n\n {% set can_clone_table = can_clone_table() %}\n\n {%- if other_existing_relation and other_existing_relation.type == 'table' and can_clone_table -%}\n\n {%- set target_relation = this.incorporate(type='table') -%}\n {% if existing_relation is not none and not existing_relation.is_table %}\n {{ log(\"Dropping relation \" ~ existing_relation ~ \" because it is of type \" ~ existing_relation.type) }}\n {{ drop_relation_if_exists(existing_relation) }}\n {% endif %}\n\n -- as a general rule, data platforms that can clone tables can also do atomic 'create or replace'\n {% call statement('main') %}\n {{ create_or_replace_clone(target_relation, defer_relation) }}\n {% endcall %}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n {% do persist_docs(target_relation, model) %}\n\n {{ return({'relations': [target_relation]}) }}\n\n {%- else -%}\n\n {%- set target_relation = this.incorporate(type='view') -%}\n\n -- reuse the view materialization\n -- TODO: support actual dispatch for materialization macros\n -- Tracking ticket: https://github.com/dbt-labs/dbt-core/issues/7799\n {% set search_name = \"materialization_view_\" ~ adapter.type() %}\n {% if not search_name in context %}\n {% set search_name = \"materialization_view_default\" %}\n {% endif %}\n {% set materialization_macro = context[search_name] %}\n {% set relations = materialization_macro() %}\n {{ return(relations) }}\n\n {%- endif -%}\n\n{%- endmaterialization -%}", + "depends_on": { + "macros": [ + "macro.dbt.load_cached_relation", + "macro.dbt.can_clone_table", + "macro.dbt.drop_relation_if_exists", + "macro.dbt.statement", + "macro.dbt.create_or_replace_clone", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.266956, + "supported_languages": ["sql"] + }, + "macro.dbt.get_table_columns_and_constraints": { + "name": "get_table_columns_and_constraints", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.get_table_columns_and_constraints", + "macro_sql": "{%- macro get_table_columns_and_constraints() -%}\n {{ adapter.dispatch('get_table_columns_and_constraints', 'dbt')() }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": ["macro.dbt.default__get_table_columns_and_constraints"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2680478, + "supported_languages": null + }, + "macro.dbt.default__get_table_columns_and_constraints": { + "name": "default__get_table_columns_and_constraints", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.default__get_table_columns_and_constraints", + "macro_sql": "{% macro default__get_table_columns_and_constraints() -%}\n {{ return(table_columns_and_constraints()) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.table_columns_and_constraints"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2681751, + "supported_languages": null + }, + "macro.dbt.table_columns_and_constraints": { + "name": "table_columns_and_constraints", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.table_columns_and_constraints", + "macro_sql": "{% macro table_columns_and_constraints() %}\n {# loop through user_provided_columns to create DDL with data types and constraints #}\n {%- set raw_column_constraints = adapter.render_raw_columns_constraints(raw_columns=model['columns']) -%}\n {%- set raw_model_constraints = adapter.render_raw_model_constraints(raw_constraints=model['constraints']) -%}\n (\n {% for c in raw_column_constraints -%}\n {{ c }}{{ \",\" if not loop.last or raw_model_constraints }}\n {% endfor %}\n {% for c in raw_model_constraints -%}\n {{ c }}{{ \",\" if not loop.last }}\n {% endfor -%}\n )\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.268726, + "supported_languages": null + }, + "macro.dbt.get_assert_columns_equivalent": { + "name": "get_assert_columns_equivalent", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.get_assert_columns_equivalent", + "macro_sql": "\n\n{%- macro get_assert_columns_equivalent(sql) -%}\n {{ adapter.dispatch('get_assert_columns_equivalent', 'dbt')(sql) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": ["macro.dbt.default__get_assert_columns_equivalent"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2689018, + "supported_languages": null + }, + "macro.dbt.default__get_assert_columns_equivalent": { + "name": "default__get_assert_columns_equivalent", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.default__get_assert_columns_equivalent", + "macro_sql": "{% macro default__get_assert_columns_equivalent(sql) -%}\n {{ return(assert_columns_equivalent(sql)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.assert_columns_equivalent"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.269043, + "supported_languages": null + }, + "macro.dbt.assert_columns_equivalent": { + "name": "assert_columns_equivalent", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.assert_columns_equivalent", + "macro_sql": "{% macro assert_columns_equivalent(sql) %}\n\n {#-- First ensure the user has defined 'columns' in yaml specification --#}\n {%- set user_defined_columns = model['columns'] -%}\n {%- if not user_defined_columns -%}\n {{ exceptions.raise_contract_error([], []) }}\n {%- endif -%}\n\n {#-- Obtain the column schema provided by sql file. #}\n {%- set sql_file_provided_columns = get_column_schema_from_query(sql, config.get('sql_header', none)) -%}\n {#--Obtain the column schema provided by the schema file by generating an 'empty schema' query from the model's columns. #}\n {%- set schema_file_provided_columns = get_column_schema_from_query(get_empty_schema_sql(user_defined_columns)) -%}\n\n {#-- create dictionaries with name and formatted data type and strings for exception #}\n {%- set sql_columns = format_columns(sql_file_provided_columns) -%}\n {%- set yaml_columns = format_columns(schema_file_provided_columns) -%}\n\n {%- if sql_columns|length != yaml_columns|length -%}\n {%- do exceptions.raise_contract_error(yaml_columns, sql_columns) -%}\n {%- endif -%}\n\n {%- for sql_col in sql_columns -%}\n {%- set yaml_col = [] -%}\n {%- for this_col in yaml_columns -%}\n {%- if this_col['name'] == sql_col['name'] -%}\n {%- do yaml_col.append(this_col) -%}\n {%- break -%}\n {%- endif -%}\n {%- endfor -%}\n {%- if not yaml_col -%}\n {#-- Column with name not found in yaml #}\n {%- do exceptions.raise_contract_error(yaml_columns, sql_columns) -%}\n {%- endif -%}\n {%- if sql_col['formatted'] != yaml_col[0]['formatted'] -%}\n {#-- Column data types don't match #}\n {%- do exceptions.raise_contract_error(yaml_columns, sql_columns) -%}\n {%- endif -%}\n {%- endfor -%}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_column_schema_from_query", + "macro.dbt.get_empty_schema_sql", + "macro.dbt.format_columns" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2703779, + "supported_languages": null + }, + "macro.dbt.format_columns": { + "name": "format_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.format_columns", + "macro_sql": "{% macro format_columns(columns) %}\n {% set formatted_columns = [] %}\n {% for column in columns %}\n {%- set formatted_column = adapter.dispatch('format_column', 'dbt')(column) -%}\n {%- do formatted_columns.append(formatted_column) -%}\n {% endfor %}\n {{ return(formatted_columns) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__format_column"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2707741, + "supported_languages": null + }, + "macro.dbt.default__format_column": { + "name": "default__format_column", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/columns_spec_ddl.sql", + "original_file_path": "macros/materializations/models/table/columns_spec_ddl.sql", + "unique_id": "macro.dbt.default__format_column", + "macro_sql": "{% macro default__format_column(column) -%}\n {% set data_type = column.dtype %}\n {% set formatted = column.column.lower() ~ \" \" ~ data_type %}\n {{ return({'name': column.name, 'data_type': data_type, 'formatted': formatted}) }}\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.271314, + "supported_languages": null + }, + "macro.dbt.materialization_table_default": { + "name": "materialization_table_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/table.sql", + "original_file_path": "macros/materializations/models/table/table.sql", + "unique_id": "macro.dbt.materialization_table_default", + "macro_sql": "{% materialization table, default %}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='table') %}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n See ../view/view.sql for more information about this relation.\n */\n {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_table_as_sql(False, intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n {% if existing_relation is not none %}\n /* Do the equivalent of rename_if_exists. 'existing_relation' could have been dropped\n since the variable was first set. */\n {% set existing_relation = load_cached_relation(existing_relation) %}\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n {% endif %}\n\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% do create_indexes(target_relation) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n -- finally, drop the existing/backup relation after the commit\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt.load_cached_relation", + "macro.dbt.make_intermediate_relation", + "macro.dbt.make_backup_relation", + "macro.dbt.drop_relation_if_exists", + "macro.dbt.run_hooks", + "macro.dbt.statement", + "macro.dbt.get_create_table_as_sql", + "macro.dbt.create_indexes", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.274841, + "supported_languages": ["sql"] + }, + "macro.dbt.get_create_table_as_sql": { + "name": "get_create_table_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/create_table_as.sql", + "original_file_path": "macros/materializations/models/table/create_table_as.sql", + "unique_id": "macro.dbt.get_create_table_as_sql", + "macro_sql": "{% macro get_create_table_as_sql(temporary, relation, sql) -%}\n {{ adapter.dispatch('get_create_table_as_sql', 'dbt')(temporary, relation, sql) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__get_create_table_as_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.275815, + "supported_languages": null + }, + "macro.dbt.default__get_create_table_as_sql": { + "name": "default__get_create_table_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/create_table_as.sql", + "original_file_path": "macros/materializations/models/table/create_table_as.sql", + "unique_id": "macro.dbt.default__get_create_table_as_sql", + "macro_sql": "{% macro default__get_create_table_as_sql(temporary, relation, sql) -%}\n {{ return(create_table_as(temporary, relation, sql)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.create_table_as"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.276017, + "supported_languages": null + }, + "macro.dbt.create_table_as": { + "name": "create_table_as", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/create_table_as.sql", + "original_file_path": "macros/materializations/models/table/create_table_as.sql", + "unique_id": "macro.dbt.create_table_as", + "macro_sql": "{% macro create_table_as(temporary, relation, compiled_code, language='sql') -%}\n {# backward compatibility for create_table_as that does not support language #}\n {% if language == \"sql\" %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code)}}\n {% else %}\n {{ adapter.dispatch('create_table_as', 'dbt')(temporary, relation, compiled_code, language) }}\n {% endif %}\n\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__create_table_as"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.276479, + "supported_languages": null + }, + "macro.dbt.default__create_table_as": { + "name": "default__create_table_as", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/create_table_as.sql", + "original_file_path": "macros/materializations/models/table/create_table_as.sql", + "unique_id": "macro.dbt.default__create_table_as", + "macro_sql": "{% macro default__create_table_as(temporary, relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n\n create {% if temporary: -%}temporary{%- endif %} table\n {{ relation.include(database=(not temporary), schema=(not temporary)) }}\n {% set contract_config = config.get('contract') %}\n {% if contract_config.enforced and (not temporary) %}\n {{ get_assert_columns_equivalent(sql) }}\n {{ get_table_columns_and_constraints() }}\n {%- set sql = get_select_subquery(sql) %}\n {% endif %}\n as (\n {{ sql }}\n );\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_assert_columns_equivalent", + "macro.dbt.get_table_columns_and_constraints", + "macro.dbt.get_select_subquery" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.277211, + "supported_languages": null + }, + "macro.dbt.default__get_column_names": { + "name": "default__get_column_names", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/create_table_as.sql", + "original_file_path": "macros/materializations/models/table/create_table_as.sql", + "unique_id": "macro.dbt.default__get_column_names", + "macro_sql": "{% macro default__get_column_names() %}\n {#- loop through user_provided_columns to get column names -#}\n {%- set user_provided_columns = model['columns'] -%}\n {%- for i in user_provided_columns %}\n {%- set col = user_provided_columns[i] -%}\n {%- set col_name = adapter.quote(col['name']) if col.get('quote') else col['name'] -%}\n {{ col_name }}{{ \", \" if not loop.last }}\n {%- endfor -%}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.277688, + "supported_languages": null + }, + "macro.dbt.get_select_subquery": { + "name": "get_select_subquery", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/create_table_as.sql", + "original_file_path": "macros/materializations/models/table/create_table_as.sql", + "unique_id": "macro.dbt.get_select_subquery", + "macro_sql": "{% macro get_select_subquery(sql) %}\n {{ return(adapter.dispatch('get_select_subquery', 'dbt')(sql)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_select_subquery"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.277882, + "supported_languages": null + }, + "macro.dbt.default__get_select_subquery": { + "name": "default__get_select_subquery", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/table/create_table_as.sql", + "original_file_path": "macros/materializations/models/table/create_table_as.sql", + "unique_id": "macro.dbt.default__get_select_subquery", + "macro_sql": "{% macro default__get_select_subquery(sql) %}\n select {{ adapter.dispatch('get_column_names', 'dbt')() }}\n from (\n {{ sql }}\n ) as model_subq\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_column_names"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.278068, + "supported_languages": null + }, + "macro.dbt.materialization_view_default": { + "name": "materialization_view_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/view.sql", + "original_file_path": "macros/materializations/models/view/view.sql", + "unique_id": "macro.dbt.materialization_view_default", + "macro_sql": "{%- materialization view, default -%}\n\n {%- set existing_relation = load_cached_relation(this) -%}\n {%- set target_relation = this.incorporate(type='view') -%}\n {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}\n\n -- the intermediate_relation should not already exist in the database; get_relation\n -- will return None in that case. Otherwise, we get a relation that we can drop\n -- later, before we try to use this name for the current operation\n {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}\n /*\n This relation (probably) doesn't exist yet. If it does exist, it's a leftover from\n a previous run, and we're going to try to drop it immediately. At the end of this\n materialization, we're going to rename the \"existing_relation\" to this identifier,\n and then we're going to drop it. In order to make sure we run the correct one of:\n - drop view ...\n - drop table ...\n\n We need to set the type of this relation to be the type of the existing_relation, if it exists,\n or else \"view\" as a sane default if it does not. Note that if the existing_relation does not\n exist, then there is nothing to move out of the way and subsequentally drop. In that case,\n this relation will be effectively unused.\n */\n {%- set backup_relation_type = 'view' if existing_relation is none else existing_relation.type -%}\n {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}\n -- as above, the backup_relation should not already exist\n {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}\n -- grab current tables grants config for comparision later on\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- drop the temp relations if they exist already in the database\n {{ drop_relation_if_exists(preexisting_intermediate_relation) }}\n {{ drop_relation_if_exists(preexisting_backup_relation) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(intermediate_relation, sql) }}\n {%- endcall %}\n\n -- cleanup\n -- move the existing view out of the way\n {% if existing_relation is not none %}\n /* Do the equivalent of rename_if_exists. 'existing_relation' could have been dropped\n since the variable was first set. */\n {% set existing_relation = load_cached_relation(existing_relation) %}\n {% if existing_relation is not none %}\n {{ adapter.rename_relation(existing_relation, backup_relation) }}\n {% endif %}\n {% endif %}\n {{ adapter.rename_relation(intermediate_relation, target_relation) }}\n\n {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n {{ adapter.commit() }}\n\n {{ drop_relation_if_exists(backup_relation) }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{%- endmaterialization -%}", + "depends_on": { + "macros": [ + "macro.dbt.load_cached_relation", + "macro.dbt.make_intermediate_relation", + "macro.dbt.make_backup_relation", + "macro.dbt.run_hooks", + "macro.dbt.drop_relation_if_exists", + "macro.dbt.statement", + "macro.dbt.get_create_view_as_sql", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2814999, + "supported_languages": ["sql"] + }, + "macro.dbt.handle_existing_table": { + "name": "handle_existing_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/helpers.sql", + "original_file_path": "macros/materializations/models/view/helpers.sql", + "unique_id": "macro.dbt.handle_existing_table", + "macro_sql": "{% macro handle_existing_table(full_refresh, old_relation) %}\n {{ adapter.dispatch('handle_existing_table', 'dbt')(full_refresh, old_relation) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__handle_existing_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2818828, + "supported_languages": null + }, + "macro.dbt.default__handle_existing_table": { + "name": "default__handle_existing_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/helpers.sql", + "original_file_path": "macros/materializations/models/view/helpers.sql", + "unique_id": "macro.dbt.default__handle_existing_table", + "macro_sql": "{% macro default__handle_existing_table(full_refresh, old_relation) %}\n {{ log(\"Dropping relation \" ~ old_relation ~ \" because it is of type \" ~ old_relation.type) }}\n {{ adapter.drop_relation(old_relation) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.282273, + "supported_languages": null + }, + "macro.dbt.create_or_replace_view": { + "name": "create_or_replace_view", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/create_or_replace_view.sql", + "original_file_path": "macros/materializations/models/view/create_or_replace_view.sql", + "unique_id": "macro.dbt.create_or_replace_view", + "macro_sql": "{% macro create_or_replace_view() %}\n {%- set identifier = model['alias'] -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set target_relation = api.Relation.create(\n identifier=identifier, schema=schema, database=database,\n type='view') -%}\n {% set grant_config = config.get('grants') %}\n\n {{ run_hooks(pre_hooks) }}\n\n -- If there's a table with the same name and we weren't told to full refresh,\n -- that's an error. If we were told to full refresh, drop it. This behavior differs\n -- for Snowflake and BigQuery, so multiple dispatch is used.\n {%- if old_relation is not none and old_relation.is_table -%}\n {{ handle_existing_table(should_full_refresh(), old_relation) }}\n {%- endif -%}\n\n -- build model\n {% call statement('main') -%}\n {{ get_create_view_as_sql(target_relation, sql) }}\n {%- endcall %}\n\n {% set should_revoke = should_revoke(exists_as_view, full_refresh_mode=True) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {{ run_hooks(post_hooks) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.run_hooks", + "macro.dbt.handle_existing_table", + "macro.dbt.should_full_refresh", + "macro.dbt.statement", + "macro.dbt.get_create_view_as_sql", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2840471, + "supported_languages": null + }, + "macro.dbt.get_create_view_as_sql": { + "name": "get_create_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/create_view_as.sql", + "original_file_path": "macros/materializations/models/view/create_view_as.sql", + "unique_id": "macro.dbt.get_create_view_as_sql", + "macro_sql": "{% macro get_create_view_as_sql(relation, sql) -%}\n {{ adapter.dispatch('get_create_view_as_sql', 'dbt')(relation, sql) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_create_view_as_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.284516, + "supported_languages": null + }, + "macro.dbt.default__get_create_view_as_sql": { + "name": "default__get_create_view_as_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/create_view_as.sql", + "original_file_path": "macros/materializations/models/view/create_view_as.sql", + "unique_id": "macro.dbt.default__get_create_view_as_sql", + "macro_sql": "{% macro default__get_create_view_as_sql(relation, sql) -%}\n {{ return(create_view_as(relation, sql)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.create_view_as"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.284692, + "supported_languages": null + }, + "macro.dbt.create_view_as": { + "name": "create_view_as", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/create_view_as.sql", + "original_file_path": "macros/materializations/models/view/create_view_as.sql", + "unique_id": "macro.dbt.create_view_as", + "macro_sql": "{% macro create_view_as(relation, sql) -%}\n {{ adapter.dispatch('create_view_as', 'dbt')(relation, sql) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__create_view_as"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.284885, + "supported_languages": null + }, + "macro.dbt.default__create_view_as": { + "name": "default__create_view_as", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/models/view/create_view_as.sql", + "original_file_path": "macros/materializations/models/view/create_view_as.sql", + "unique_id": "macro.dbt.default__create_view_as", + "macro_sql": "{% macro default__create_view_as(relation, sql) -%}\n {%- set sql_header = config.get('sql_header', none) -%}\n\n {{ sql_header if sql_header is not none }}\n create view {{ relation }}\n {% set contract_config = config.get('contract') %}\n {% if contract_config.enforced %}\n {{ get_assert_columns_equivalent(sql) }}\n {%- endif %}\n as (\n {{ sql }}\n );\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_assert_columns_equivalent"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.285345, + "supported_languages": null + }, + "macro.dbt.materialization_seed_default": { + "name": "materialization_seed_default", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/seed.sql", + "original_file_path": "macros/materializations/seeds/seed.sql", + "unique_id": "macro.dbt.materialization_seed_default", + "macro_sql": "{% materialization seed, default %}\n\n {%- set identifier = model['alias'] -%}\n {%- set full_refresh_mode = (should_full_refresh()) -%}\n\n {%- set old_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) -%}\n\n {%- set exists_as_table = (old_relation is not none and old_relation.is_table) -%}\n {%- set exists_as_view = (old_relation is not none and old_relation.is_view) -%}\n\n {%- set grant_config = config.get('grants') -%}\n {%- set agate_table = load_agate_table() -%}\n -- grab current tables grants config for comparison later on\n\n {%- do store_result('agate_table', response='OK', agate_table=agate_table) -%}\n\n {{ run_hooks(pre_hooks, inside_transaction=False) }}\n\n -- `BEGIN` happens here:\n {{ run_hooks(pre_hooks, inside_transaction=True) }}\n\n -- build model\n {% set create_table_sql = \"\" %}\n {% if exists_as_view %}\n {{ exceptions.raise_compiler_error(\"Cannot seed to '{}', it is a view\".format(old_relation)) }}\n {% elif exists_as_table %}\n {% set create_table_sql = reset_csv_table(model, full_refresh_mode, old_relation, agate_table) %}\n {% else %}\n {% set create_table_sql = create_csv_table(model, agate_table) %}\n {% endif %}\n\n {% set code = 'CREATE' if full_refresh_mode else 'INSERT' %}\n {% set rows_affected = (agate_table.rows | length) %}\n {% set sql = load_csv_rows(model, agate_table) %}\n\n {% call noop_statement('main', code ~ ' ' ~ rows_affected, code, rows_affected) %}\n {{ get_csv_sql(create_table_sql, sql) }};\n {% endcall %}\n\n {% set target_relation = this.incorporate(type='table') %}\n\n {% set should_revoke = should_revoke(old_relation, full_refresh_mode) %}\n {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}\n\n {% do persist_docs(target_relation, model) %}\n\n {% if full_refresh_mode or not exists_as_table %}\n {% do create_indexes(target_relation) %}\n {% endif %}\n\n {{ run_hooks(post_hooks, inside_transaction=True) }}\n\n -- `COMMIT` happens here\n {{ adapter.commit() }}\n\n {{ run_hooks(post_hooks, inside_transaction=False) }}\n\n {{ return({'relations': [target_relation]}) }}\n\n{% endmaterialization %}", + "depends_on": { + "macros": [ + "macro.dbt.should_full_refresh", + "macro.dbt.run_hooks", + "macro.dbt.reset_csv_table", + "macro.dbt.create_csv_table", + "macro.dbt.load_csv_rows", + "macro.dbt.noop_statement", + "macro.dbt.get_csv_sql", + "macro.dbt.should_revoke", + "macro.dbt.apply_grants", + "macro.dbt.persist_docs", + "macro.dbt.create_indexes" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.289053, + "supported_languages": ["sql"] + }, + "macro.dbt.create_csv_table": { + "name": "create_csv_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.create_csv_table", + "macro_sql": "{% macro create_csv_table(model, agate_table) -%}\n {{ adapter.dispatch('create_csv_table', 'dbt')(model, agate_table) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__create_csv_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.294555, + "supported_languages": null + }, + "macro.dbt.default__create_csv_table": { + "name": "default__create_csv_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.default__create_csv_table", + "macro_sql": "{% macro default__create_csv_table(model, agate_table) %}\n {%- set column_override = model['config'].get('column_types', {}) -%}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n\n {% set sql %}\n create table {{ this.render() }} (\n {%- for col_name in agate_table.column_names -%}\n {%- set inferred_type = adapter.convert_type(agate_table, loop.index0) -%}\n {%- set type = column_override.get(col_name, inferred_type) -%}\n {%- set column_name = (col_name | string) -%}\n {{ adapter.quote_seed_column(column_name, quote_seed_column) }} {{ type }} {%- if not loop.last -%}, {%- endif -%}\n {%- endfor -%}\n )\n {% endset %}\n\n {% call statement('_') -%}\n {{ sql }}\n {%- endcall %}\n\n {{ return(sql) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.295679, + "supported_languages": null + }, + "macro.dbt.reset_csv_table": { + "name": "reset_csv_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.reset_csv_table", + "macro_sql": "{% macro reset_csv_table(model, full_refresh, old_relation, agate_table) -%}\n {{ adapter.dispatch('reset_csv_table', 'dbt')(model, full_refresh, old_relation, agate_table) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__reset_csv_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2959921, + "supported_languages": null + }, + "macro.dbt.default__reset_csv_table": { + "name": "default__reset_csv_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.default__reset_csv_table", + "macro_sql": "{% macro default__reset_csv_table(model, full_refresh, old_relation, agate_table) %}\n {% set sql = \"\" %}\n {% if full_refresh %}\n {{ adapter.drop_relation(old_relation) }}\n {% set sql = create_csv_table(model, agate_table) %}\n {% else %}\n {{ adapter.truncate_relation(old_relation) }}\n {% set sql = \"truncate table \" ~ old_relation %}\n {% endif %}\n\n {{ return(sql) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.create_csv_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.29652, + "supported_languages": null + }, + "macro.dbt.get_csv_sql": { + "name": "get_csv_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.get_csv_sql", + "macro_sql": "{% macro get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ adapter.dispatch('get_csv_sql', 'dbt')(create_or_truncate_sql, insert_sql) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_csv_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.296732, + "supported_languages": null + }, + "macro.dbt.default__get_csv_sql": { + "name": "default__get_csv_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.default__get_csv_sql", + "macro_sql": "{% macro default__get_csv_sql(create_or_truncate_sql, insert_sql) %}\n {{ create_or_truncate_sql }};\n -- dbt seed --\n {{ insert_sql }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2968771, + "supported_languages": null + }, + "macro.dbt.get_binding_char": { + "name": "get_binding_char", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.get_binding_char", + "macro_sql": "{% macro get_binding_char() -%}\n {{ adapter.dispatch('get_binding_char', 'dbt')() }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_binding_char"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2970219, + "supported_languages": null + }, + "macro.dbt.default__get_binding_char": { + "name": "default__get_binding_char", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.default__get_binding_char", + "macro_sql": "{% macro default__get_binding_char() %}\n {{ return('%s') }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2971442, + "supported_languages": null + }, + "macro.dbt.get_batch_size": { + "name": "get_batch_size", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.get_batch_size", + "macro_sql": "{% macro get_batch_size() -%}\n {{ return(adapter.dispatch('get_batch_size', 'dbt')()) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_batch_size"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.297311, + "supported_languages": null + }, + "macro.dbt.default__get_batch_size": { + "name": "default__get_batch_size", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.default__get_batch_size", + "macro_sql": "{% macro default__get_batch_size() %}\n {{ return(10000) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2974389, + "supported_languages": null + }, + "macro.dbt.get_seed_column_quoted_csv": { + "name": "get_seed_column_quoted_csv", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.get_seed_column_quoted_csv", + "macro_sql": "{% macro get_seed_column_quoted_csv(model, column_names) %}\n {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}\n {% set quoted = [] %}\n {% for col in column_names -%}\n {%- do quoted.append(adapter.quote_seed_column(col, quote_seed_column)) -%}\n {%- endfor %}\n\n {%- set dest_cols_csv = quoted | join(', ') -%}\n {{ return(dest_cols_csv) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.297951, + "supported_languages": null + }, + "macro.dbt.load_csv_rows": { + "name": "load_csv_rows", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.load_csv_rows", + "macro_sql": "{% macro load_csv_rows(model, agate_table) -%}\n {{ adapter.dispatch('load_csv_rows', 'dbt')(model, agate_table) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__load_csv_rows"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.298149, + "supported_languages": null + }, + "macro.dbt.default__load_csv_rows": { + "name": "default__load_csv_rows", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/materializations/seeds/helpers.sql", + "original_file_path": "macros/materializations/seeds/helpers.sql", + "unique_id": "macro.dbt.default__load_csv_rows", + "macro_sql": "{% macro default__load_csv_rows(model, agate_table) %}\n\n {% set batch_size = get_batch_size() %}\n\n {% set cols_sql = get_seed_column_quoted_csv(model, agate_table.column_names) %}\n {% set bindings = [] %}\n\n {% set statements = [] %}\n\n {% for chunk in agate_table.rows | batch(batch_size) %}\n {% set bindings = [] %}\n\n {% for row in chunk %}\n {% do bindings.extend(row) %}\n {% endfor %}\n\n {% set sql %}\n insert into {{ this.render() }} ({{ cols_sql }}) values\n {% for row in chunk -%}\n ({%- for column in agate_table.column_names -%}\n {{ get_binding_char() }}\n {%- if not loop.last%},{%- endif %}\n {%- endfor -%})\n {%- if not loop.last%},{%- endif %}\n {%- endfor %}\n {% endset %}\n\n {% do adapter.add_query(sql, bindings=bindings, abridge_sql_log=True) %}\n\n {% if loop.index0 == 0 %}\n {% do statements.append(sql) %}\n {% endif %}\n {% endfor %}\n\n {# Return SQL so we can render it out into the compiled files #}\n {{ return(statements[0]) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.get_batch_size", + "macro.dbt.get_seed_column_quoted_csv", + "macro.dbt.get_binding_char" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.2999191, + "supported_languages": null + }, + "macro.dbt.generate_alias_name": { + "name": "generate_alias_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/get_custom_name/get_custom_alias.sql", + "original_file_path": "macros/get_custom_name/get_custom_alias.sql", + "unique_id": "macro.dbt.generate_alias_name", + "macro_sql": "{% macro generate_alias_name(custom_alias_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_alias_name', 'dbt')(custom_alias_name, node)) %}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__generate_alias_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.30044, + "supported_languages": null + }, + "macro.dbt.default__generate_alias_name": { + "name": "default__generate_alias_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/get_custom_name/get_custom_alias.sql", + "original_file_path": "macros/get_custom_name/get_custom_alias.sql", + "unique_id": "macro.dbt.default__generate_alias_name", + "macro_sql": "{% macro default__generate_alias_name(custom_alias_name=none, node=none) -%}\n\n {%- if custom_alias_name -%}\n\n {{ custom_alias_name | trim }}\n\n {%- elif node.version -%}\n\n {{ return(node.name ~ \"_v\" ~ (node.version | replace(\".\", \"_\"))) }}\n\n {%- else -%}\n\n {{ node.name }}\n\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.300857, + "supported_languages": null + }, + "macro.dbt.generate_schema_name": { + "name": "generate_schema_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/get_custom_name/get_custom_schema.sql", + "original_file_path": "macros/get_custom_name/get_custom_schema.sql", + "unique_id": "macro.dbt.generate_schema_name", + "macro_sql": "{% macro generate_schema_name(custom_schema_name=none, node=none) -%}\n {{ return(adapter.dispatch('generate_schema_name', 'dbt')(custom_schema_name, node)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__generate_schema_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.301497, + "supported_languages": null + }, + "macro.dbt.default__generate_schema_name": { + "name": "default__generate_schema_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/get_custom_name/get_custom_schema.sql", + "original_file_path": "macros/get_custom_name/get_custom_schema.sql", + "unique_id": "macro.dbt.default__generate_schema_name", + "macro_sql": "{% macro default__generate_schema_name(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if custom_schema_name is none -%}\n\n {{ default_schema }}\n\n {%- else -%}\n\n {{ default_schema }}_{{ custom_schema_name | trim }}\n\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.301826, + "supported_languages": null + }, + "macro.dbt.generate_schema_name_for_env": { + "name": "generate_schema_name_for_env", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/get_custom_name/get_custom_schema.sql", + "original_file_path": "macros/get_custom_name/get_custom_schema.sql", + "unique_id": "macro.dbt.generate_schema_name_for_env", + "macro_sql": "{% macro generate_schema_name_for_env(custom_schema_name, node) -%}\n\n {%- set default_schema = target.schema -%}\n {%- if target.name == 'prod' and custom_schema_name is not none -%}\n\n {{ custom_schema_name | trim }}\n\n {%- else -%}\n\n {{ default_schema }}\n\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.302146, + "supported_languages": null + }, + "macro.dbt.generate_database_name": { + "name": "generate_database_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/get_custom_name/get_custom_database.sql", + "original_file_path": "macros/get_custom_name/get_custom_database.sql", + "unique_id": "macro.dbt.generate_database_name", + "macro_sql": "{% macro generate_database_name(custom_database_name=none, node=none) -%}\n {% do return(adapter.dispatch('generate_database_name', 'dbt')(custom_database_name, node)) %}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__generate_database_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.302591, + "supported_languages": null + }, + "macro.dbt.default__generate_database_name": { + "name": "default__generate_database_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/get_custom_name/get_custom_database.sql", + "original_file_path": "macros/get_custom_name/get_custom_database.sql", + "unique_id": "macro.dbt.default__generate_database_name", + "macro_sql": "{% macro default__generate_database_name(custom_database_name=none, node=none) -%}\n {%- set default_database = target.database -%}\n {%- if custom_database_name is none -%}\n\n {{ default_database }}\n\n {%- else -%}\n\n {{ custom_database_name }}\n\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3028648, + "supported_languages": null + }, + "macro.dbt.default__test_relationships": { + "name": "default__test_relationships", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/generic_test_sql/relationships.sql", + "original_file_path": "macros/generic_test_sql/relationships.sql", + "unique_id": "macro.dbt.default__test_relationships", + "macro_sql": "{% macro default__test_relationships(model, column_name, to, field) %}\n\nwith child as (\n select {{ column_name }} as from_field\n from {{ model }}\n where {{ column_name }} is not null\n),\n\nparent as (\n select {{ field }} as to_field\n from {{ to }}\n)\n\nselect\n from_field\n\nfrom child\nleft join parent\n on child.from_field = parent.to_field\n\nwhere parent.to_field is null\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3032992, + "supported_languages": null + }, + "macro.dbt.default__test_not_null": { + "name": "default__test_not_null", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/generic_test_sql/not_null.sql", + "original_file_path": "macros/generic_test_sql/not_null.sql", + "unique_id": "macro.dbt.default__test_not_null", + "macro_sql": "{% macro default__test_not_null(model, column_name) %}\n\n{% set column_list = '*' if should_store_failures() else column_name %}\n\nselect {{ column_list }}\nfrom {{ model }}\nwhere {{ column_name }} is null\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.should_store_failures"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.303657, + "supported_languages": null + }, + "macro.dbt.default__test_unique": { + "name": "default__test_unique", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/generic_test_sql/unique.sql", + "original_file_path": "macros/generic_test_sql/unique.sql", + "unique_id": "macro.dbt.default__test_unique", + "macro_sql": "{% macro default__test_unique(model, column_name) %}\n\nselect\n {{ column_name }} as unique_field,\n count(*) as n_records\n\nfrom {{ model }}\nwhere {{ column_name }} is not null\ngroup by {{ column_name }}\nhaving count(*) > 1\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3039339, + "supported_languages": null + }, + "macro.dbt.default__test_accepted_values": { + "name": "default__test_accepted_values", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/generic_test_sql/accepted_values.sql", + "original_file_path": "macros/generic_test_sql/accepted_values.sql", + "unique_id": "macro.dbt.default__test_accepted_values", + "macro_sql": "{% macro default__test_accepted_values(model, column_name, values, quote=True) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field,\n count(*) as n_records\n\n from {{ model }}\n group by {{ column_name }}\n\n)\n\nselect *\nfrom all_values\nwhere value_field not in (\n {% for value in values -%}\n {% if quote -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif -%}\n {%- if not loop.last -%},{%- endif %}\n {%- endfor %}\n)\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3045819, + "supported_languages": null + }, + "macro.dbt.statement": { + "name": "statement", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/etc/statement.sql", + "original_file_path": "macros/etc/statement.sql", + "unique_id": "macro.dbt.statement", + "macro_sql": "\n{%- macro statement(name=None, fetch_result=False, auto_begin=True, language='sql') -%}\n {%- if execute: -%}\n {%- set compiled_code = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime {} for node \"{}\"'.format(language, model['unique_id'])) }}\n {{ write(compiled_code) }}\n {%- endif -%}\n {%- if language == 'sql'-%}\n {%- set res, table = adapter.execute(compiled_code, auto_begin=auto_begin, fetch=fetch_result) -%}\n {%- elif language == 'python' -%}\n {%- set res = submit_python_job(model, compiled_code) -%}\n {#-- TODO: What should table be for python models? --#}\n {%- set table = None -%}\n {%- else -%}\n {% do exceptions.raise_compiler_error(\"statement macro didn't get supported language\") %}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_result(name, response=res, agate_table=table) }}\n {%- endif -%}\n\n {%- endif -%}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.306224, + "supported_languages": null + }, + "macro.dbt.noop_statement": { + "name": "noop_statement", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/etc/statement.sql", + "original_file_path": "macros/etc/statement.sql", + "unique_id": "macro.dbt.noop_statement", + "macro_sql": "{% macro noop_statement(name=None, message=None, code=None, rows_affected=None, res=None) -%}\n {%- set sql = caller() -%}\n\n {%- if name == 'main' -%}\n {{ log('Writing runtime SQL for node \"{}\"'.format(model['unique_id'])) }}\n {{ write(sql) }}\n {%- endif -%}\n\n {%- if name is not none -%}\n {{ store_raw_result(name, message=message, code=code, rows_affected=rows_affected, agate_table=res) }}\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.306844, + "supported_languages": null + }, + "macro.dbt.run_query": { + "name": "run_query", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/etc/statement.sql", + "original_file_path": "macros/etc/statement.sql", + "unique_id": "macro.dbt.run_query", + "macro_sql": "{% macro run_query(sql) %}\n {% call statement(\"run_query_statement\", fetch_result=true, auto_begin=false) %}\n {{ sql }}\n {% endcall %}\n\n {% do return(load_result(\"run_query_statement\").table) %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.307171, + "supported_languages": null + }, + "macro.dbt.convert_datetime": { + "name": "convert_datetime", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/etc/datetime.sql", + "original_file_path": "macros/etc/datetime.sql", + "unique_id": "macro.dbt.convert_datetime", + "macro_sql": "{% macro convert_datetime(date_str, date_fmt) %}\n\n {% set error_msg -%}\n The provided partition date '{{ date_str }}' does not match the expected format '{{ date_fmt }}'\n {%- endset %}\n\n {% set res = try_or_compiler_error(error_msg, modules.datetime.datetime.strptime, date_str.strip(), date_fmt) %}\n {{ return(res) }}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.309071, + "supported_languages": null + }, + "macro.dbt.dates_in_range": { + "name": "dates_in_range", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/etc/datetime.sql", + "original_file_path": "macros/etc/datetime.sql", + "unique_id": "macro.dbt.dates_in_range", + "macro_sql": "{% macro dates_in_range(start_date_str, end_date_str=none, in_fmt=\"%Y%m%d\", out_fmt=\"%Y%m%d\") %}\n {% set end_date_str = start_date_str if end_date_str is none else end_date_str %}\n\n {% set start_date = convert_datetime(start_date_str, in_fmt) %}\n {% set end_date = convert_datetime(end_date_str, in_fmt) %}\n\n {% set day_count = (end_date - start_date).days %}\n {% if day_count < 0 %}\n {% set msg -%}\n Partiton start date is after the end date ({{ start_date }}, {{ end_date }})\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg, model) }}\n {% endif %}\n\n {% set date_list = [] %}\n {% for i in range(0, day_count + 1) %}\n {% set the_date = (modules.datetime.timedelta(days=i) + start_date) %}\n {% if not out_fmt %}\n {% set _ = date_list.append(the_date) %}\n {% else %}\n {% set _ = date_list.append(the_date.strftime(out_fmt)) %}\n {% endif %}\n {% endfor %}\n\n {{ return(date_list) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.convert_datetime"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.310411, + "supported_languages": null + }, + "macro.dbt.partition_range": { + "name": "partition_range", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/etc/datetime.sql", + "original_file_path": "macros/etc/datetime.sql", + "unique_id": "macro.dbt.partition_range", + "macro_sql": "{% macro partition_range(raw_partition_date, date_fmt='%Y%m%d') %}\n {% set partition_range = (raw_partition_date | string).split(\",\") %}\n\n {% if (partition_range | length) == 1 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = none %}\n {% elif (partition_range | length) == 2 %}\n {% set start_date = partition_range[0] %}\n {% set end_date = partition_range[1] %}\n {% else %}\n {{ exceptions.raise_compiler_error(\"Invalid partition time. Expected format: {Start Date}[,{End Date}]. Got: \" ~ raw_partition_date) }}\n {% endif %}\n\n {{ return(dates_in_range(start_date, end_date, in_fmt=date_fmt)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.dates_in_range"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.311191, + "supported_languages": null + }, + "macro.dbt.py_current_timestring": { + "name": "py_current_timestring", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/etc/datetime.sql", + "original_file_path": "macros/etc/datetime.sql", + "unique_id": "macro.dbt.py_current_timestring", + "macro_sql": "{% macro py_current_timestring() %}\n {% set dt = modules.datetime.datetime.now() %}\n {% do return(dt.strftime(\"%Y%m%d%H%M%S%f\")) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.311577, + "supported_languages": null + }, + "macro.dbt.except": { + "name": "except", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/except.sql", + "original_file_path": "macros/utils/except.sql", + "unique_id": "macro.dbt.except", + "macro_sql": "{% macro except() %}\n {{ return(adapter.dispatch('except', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__except"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.311944, + "supported_languages": null + }, + "macro.dbt.default__except": { + "name": "default__except", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/except.sql", + "original_file_path": "macros/utils/except.sql", + "unique_id": "macro.dbt.default__except", + "macro_sql": "{% macro default__except() %}\n\n except\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.312046, + "supported_languages": null + }, + "macro.dbt.replace": { + "name": "replace", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/replace.sql", + "original_file_path": "macros/utils/replace.sql", + "unique_id": "macro.dbt.replace", + "macro_sql": "{% macro replace(field, old_chars, new_chars) -%}\n {{ return(adapter.dispatch('replace', 'dbt') (field, old_chars, new_chars)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__replace"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3124878, + "supported_languages": null + }, + "macro.dbt.default__replace": { + "name": "default__replace", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/replace.sql", + "original_file_path": "macros/utils/replace.sql", + "unique_id": "macro.dbt.default__replace", + "macro_sql": "{% macro default__replace(field, old_chars, new_chars) %}\n\n replace(\n {{ field }},\n {{ old_chars }},\n {{ new_chars }}\n )\n\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.312692, + "supported_languages": null + }, + "macro.dbt.concat": { + "name": "concat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/concat.sql", + "original_file_path": "macros/utils/concat.sql", + "unique_id": "macro.dbt.concat", + "macro_sql": "{% macro concat(fields) -%}\n {{ return(adapter.dispatch('concat', 'dbt')(fields)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__concat"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.312974, + "supported_languages": null + }, + "macro.dbt.default__concat": { + "name": "default__concat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/concat.sql", + "original_file_path": "macros/utils/concat.sql", + "unique_id": "macro.dbt.default__concat", + "macro_sql": "{% macro default__concat(fields) -%}\n {{ fields|join(' || ') }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.313113, + "supported_languages": null + }, + "macro.dbt.length": { + "name": "length", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/length.sql", + "original_file_path": "macros/utils/length.sql", + "unique_id": "macro.dbt.length", + "macro_sql": "{% macro length(expression) -%}\n {{ return(adapter.dispatch('length', 'dbt') (expression)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__length"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3133912, + "supported_languages": null + }, + "macro.dbt.default__length": { + "name": "default__length", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/length.sql", + "original_file_path": "macros/utils/length.sql", + "unique_id": "macro.dbt.default__length", + "macro_sql": "{% macro default__length(expression) %}\n\n length(\n {{ expression }}\n )\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.313511, + "supported_languages": null + }, + "macro.dbt.dateadd": { + "name": "dateadd", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/dateadd.sql", + "original_file_path": "macros/utils/dateadd.sql", + "unique_id": "macro.dbt.dateadd", + "macro_sql": "{% macro dateadd(datepart, interval, from_date_or_timestamp) %}\n {{ return(adapter.dispatch('dateadd', 'dbt')(datepart, interval, from_date_or_timestamp)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__dateadd"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3138719, + "supported_languages": null + }, + "macro.dbt.default__dateadd": { + "name": "default__dateadd", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/dateadd.sql", + "original_file_path": "macros/utils/dateadd.sql", + "unique_id": "macro.dbt.default__dateadd", + "macro_sql": "{% macro default__dateadd(datepart, interval, from_date_or_timestamp) %}\n\n dateadd(\n {{ datepart }},\n {{ interval }},\n {{ from_date_or_timestamp }}\n )\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.314044, + "supported_languages": null + }, + "macro.dbt.intersect": { + "name": "intersect", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/intersect.sql", + "original_file_path": "macros/utils/intersect.sql", + "unique_id": "macro.dbt.intersect", + "macro_sql": "{% macro intersect() %}\n {{ return(adapter.dispatch('intersect', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__intersect"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.314284, + "supported_languages": null + }, + "macro.dbt.default__intersect": { + "name": "default__intersect", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/intersect.sql", + "original_file_path": "macros/utils/intersect.sql", + "unique_id": "macro.dbt.default__intersect", + "macro_sql": "{% macro default__intersect() %}\n\n intersect\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.314375, + "supported_languages": null + }, + "macro.dbt.escape_single_quotes": { + "name": "escape_single_quotes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/escape_single_quotes.sql", + "original_file_path": "macros/utils/escape_single_quotes.sql", + "unique_id": "macro.dbt.escape_single_quotes", + "macro_sql": "{% macro escape_single_quotes(expression) %}\n {{ return(adapter.dispatch('escape_single_quotes', 'dbt') (expression)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__escape_single_quotes"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3146691, + "supported_languages": null + }, + "macro.dbt.default__escape_single_quotes": { + "name": "default__escape_single_quotes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/escape_single_quotes.sql", + "original_file_path": "macros/utils/escape_single_quotes.sql", + "unique_id": "macro.dbt.default__escape_single_quotes", + "macro_sql": "{% macro default__escape_single_quotes(expression) -%}\n{{ expression | replace(\"'\",\"''\") }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.31482, + "supported_languages": null + }, + "macro.dbt.right": { + "name": "right", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/right.sql", + "original_file_path": "macros/utils/right.sql", + "unique_id": "macro.dbt.right", + "macro_sql": "{% macro right(string_text, length_expression) -%}\n {{ return(adapter.dispatch('right', 'dbt') (string_text, length_expression)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_snowflake.snowflake__right"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.315131, + "supported_languages": null + }, + "macro.dbt.default__right": { + "name": "default__right", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/right.sql", + "original_file_path": "macros/utils/right.sql", + "unique_id": "macro.dbt.default__right", + "macro_sql": "{% macro default__right(string_text, length_expression) %}\n\n right(\n {{ string_text }},\n {{ length_expression }}\n )\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3152819, + "supported_languages": null + }, + "macro.dbt.listagg": { + "name": "listagg", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/listagg.sql", + "original_file_path": "macros/utils/listagg.sql", + "unique_id": "macro.dbt.listagg", + "macro_sql": "{% macro listagg(measure, delimiter_text=\"','\", order_by_clause=none, limit_num=none) -%}\n {{ return(adapter.dispatch('listagg', 'dbt') (measure, delimiter_text, order_by_clause, limit_num)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__listagg"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.315952, + "supported_languages": null + }, + "macro.dbt.default__listagg": { + "name": "default__listagg", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/listagg.sql", + "original_file_path": "macros/utils/listagg.sql", + "unique_id": "macro.dbt.default__listagg", + "macro_sql": "{% macro default__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}\n\n {% if limit_num -%}\n array_to_string(\n array_slice(\n array_agg(\n {{ measure }}\n ){% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n ,0\n ,{{ limit_num }}\n ),\n {{ delimiter_text }}\n )\n {%- else %}\n listagg(\n {{ measure }},\n {{ delimiter_text }}\n )\n {% if order_by_clause -%}\n within group ({{ order_by_clause }})\n {%- endif %}\n {%- endif %}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3163788, + "supported_languages": null + }, + "macro.dbt.datediff": { + "name": "datediff", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/datediff.sql", + "original_file_path": "macros/utils/datediff.sql", + "unique_id": "macro.dbt.datediff", + "macro_sql": "{% macro datediff(first_date, second_date, datepart) %}\n {{ return(adapter.dispatch('datediff', 'dbt')(first_date, second_date, datepart)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__datediff"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.316732, + "supported_languages": null + }, + "macro.dbt.default__datediff": { + "name": "default__datediff", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/datediff.sql", + "original_file_path": "macros/utils/datediff.sql", + "unique_id": "macro.dbt.default__datediff", + "macro_sql": "{% macro default__datediff(first_date, second_date, datepart) -%}\n\n datediff(\n {{ datepart }},\n {{ first_date }},\n {{ second_date }}\n )\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.316906, + "supported_languages": null + }, + "macro.dbt.safe_cast": { + "name": "safe_cast", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/safe_cast.sql", + "original_file_path": "macros/utils/safe_cast.sql", + "unique_id": "macro.dbt.safe_cast", + "macro_sql": "{% macro safe_cast(field, type) %}\n {{ return(adapter.dispatch('safe_cast', 'dbt') (field, type)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_snowflake.snowflake__safe_cast"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.317203, + "supported_languages": null + }, + "macro.dbt.default__safe_cast": { + "name": "default__safe_cast", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/safe_cast.sql", + "original_file_path": "macros/utils/safe_cast.sql", + "unique_id": "macro.dbt.default__safe_cast", + "macro_sql": "{% macro default__safe_cast(field, type) %}\n {# most databases don't support this function yet\n so we just need to use cast #}\n cast({{field}} as {{type}})\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.317454, + "supported_languages": null + }, + "macro.dbt.hash": { + "name": "hash", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/hash.sql", + "original_file_path": "macros/utils/hash.sql", + "unique_id": "macro.dbt.hash", + "macro_sql": "{% macro hash(field) -%}\n {{ return(adapter.dispatch('hash', 'dbt') (field)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__hash"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.317724, + "supported_languages": null + }, + "macro.dbt.default__hash": { + "name": "default__hash", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/hash.sql", + "original_file_path": "macros/utils/hash.sql", + "unique_id": "macro.dbt.default__hash", + "macro_sql": "{% macro default__hash(field) -%}\n md5(cast({{ field }} as {{ api.Column.translate_type('string') }}))\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.31789, + "supported_languages": null + }, + "macro.dbt.cast_bool_to_text": { + "name": "cast_bool_to_text", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/cast_bool_to_text.sql", + "original_file_path": "macros/utils/cast_bool_to_text.sql", + "unique_id": "macro.dbt.cast_bool_to_text", + "macro_sql": "{% macro cast_bool_to_text(field) %}\n {{ adapter.dispatch('cast_bool_to_text', 'dbt') (field) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__cast_bool_to_text"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.318143, + "supported_languages": null + }, + "macro.dbt.default__cast_bool_to_text": { + "name": "default__cast_bool_to_text", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/cast_bool_to_text.sql", + "original_file_path": "macros/utils/cast_bool_to_text.sql", + "unique_id": "macro.dbt.default__cast_bool_to_text", + "macro_sql": "{% macro default__cast_bool_to_text(field) %}\n cast({{ field }} as {{ api.Column.translate_type('string') }})\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.318312, + "supported_languages": null + }, + "macro.dbt.any_value": { + "name": "any_value", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/any_value.sql", + "original_file_path": "macros/utils/any_value.sql", + "unique_id": "macro.dbt.any_value", + "macro_sql": "{% macro any_value(expression) -%}\n {{ return(adapter.dispatch('any_value', 'dbt') (expression)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__any_value"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.318571, + "supported_languages": null + }, + "macro.dbt.default__any_value": { + "name": "default__any_value", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/any_value.sql", + "original_file_path": "macros/utils/any_value.sql", + "unique_id": "macro.dbt.default__any_value", + "macro_sql": "{% macro default__any_value(expression) -%}\n\n any_value({{ expression }})\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.318678, + "supported_languages": null + }, + "macro.dbt.position": { + "name": "position", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/position.sql", + "original_file_path": "macros/utils/position.sql", + "unique_id": "macro.dbt.position", + "macro_sql": "{% macro position(substring_text, string_text) -%}\n {{ return(adapter.dispatch('position', 'dbt') (substring_text, string_text)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__position"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.318977, + "supported_languages": null + }, + "macro.dbt.default__position": { + "name": "default__position", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/position.sql", + "original_file_path": "macros/utils/position.sql", + "unique_id": "macro.dbt.default__position", + "macro_sql": "{% macro default__position(substring_text, string_text) %}\n\n position(\n {{ substring_text }} in {{ string_text }}\n )\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.319123, + "supported_languages": null + }, + "macro.dbt.string_literal": { + "name": "string_literal", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/literal.sql", + "original_file_path": "macros/utils/literal.sql", + "unique_id": "macro.dbt.string_literal", + "macro_sql": "{%- macro string_literal(value) -%}\n {{ return(adapter.dispatch('string_literal', 'dbt') (value)) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__string_literal"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.319378, + "supported_languages": null + }, + "macro.dbt.default__string_literal": { + "name": "default__string_literal", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/literal.sql", + "original_file_path": "macros/utils/literal.sql", + "unique_id": "macro.dbt.default__string_literal", + "macro_sql": "{% macro default__string_literal(value) -%}\n '{{ value }}'\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.319487, + "supported_languages": null + }, + "macro.dbt.type_string": { + "name": "type_string", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.type_string", + "macro_sql": "\n\n{%- macro type_string() -%}\n {{ return(adapter.dispatch('type_string', 'dbt')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__type_string"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.320469, + "supported_languages": null + }, + "macro.dbt.default__type_string": { + "name": "default__type_string", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.default__type_string", + "macro_sql": "{% macro default__type_string() %}\n {{ return(api.Column.translate_type(\"string\")) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.320639, + "supported_languages": null + }, + "macro.dbt.type_timestamp": { + "name": "type_timestamp", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.type_timestamp", + "macro_sql": "\n\n{%- macro type_timestamp() -%}\n {{ return(adapter.dispatch('type_timestamp', 'dbt')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__type_timestamp"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3208199, + "supported_languages": null + }, + "macro.dbt.default__type_timestamp": { + "name": "default__type_timestamp", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.default__type_timestamp", + "macro_sql": "{% macro default__type_timestamp() %}\n {{ return(api.Column.translate_type(\"timestamp\")) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.320991, + "supported_languages": null + }, + "macro.dbt.type_float": { + "name": "type_float", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.type_float", + "macro_sql": "\n\n{%- macro type_float() -%}\n {{ return(adapter.dispatch('type_float', 'dbt')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__type_float"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.321173, + "supported_languages": null + }, + "macro.dbt.default__type_float": { + "name": "default__type_float", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.default__type_float", + "macro_sql": "{% macro default__type_float() %}\n {{ return(api.Column.translate_type(\"float\")) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.321439, + "supported_languages": null + }, + "macro.dbt.type_numeric": { + "name": "type_numeric", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.type_numeric", + "macro_sql": "\n\n{%- macro type_numeric() -%}\n {{ return(adapter.dispatch('type_numeric', 'dbt')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__type_numeric"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.321758, + "supported_languages": null + }, + "macro.dbt.default__type_numeric": { + "name": "default__type_numeric", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.default__type_numeric", + "macro_sql": "{% macro default__type_numeric() %}\n {{ return(api.Column.numeric_type(\"numeric\", 28, 6)) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.321972, + "supported_languages": null + }, + "macro.dbt.type_bigint": { + "name": "type_bigint", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.type_bigint", + "macro_sql": "\n\n{%- macro type_bigint() -%}\n {{ return(adapter.dispatch('type_bigint', 'dbt')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__type_bigint"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.322144, + "supported_languages": null + }, + "macro.dbt.default__type_bigint": { + "name": "default__type_bigint", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.default__type_bigint", + "macro_sql": "{% macro default__type_bigint() %}\n {{ return(api.Column.translate_type(\"bigint\")) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.32231, + "supported_languages": null + }, + "macro.dbt.type_int": { + "name": "type_int", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.type_int", + "macro_sql": "\n\n{%- macro type_int() -%}\n {{ return(adapter.dispatch('type_int', 'dbt')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__type_int"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3225799, + "supported_languages": null + }, + "macro.dbt.default__type_int": { + "name": "default__type_int", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.default__type_int", + "macro_sql": "{%- macro default__type_int() -%}\n {{ return(api.Column.translate_type(\"integer\")) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.32274, + "supported_languages": null + }, + "macro.dbt.type_boolean": { + "name": "type_boolean", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.type_boolean", + "macro_sql": "\n\n{%- macro type_boolean() -%}\n {{ return(adapter.dispatch('type_boolean', 'dbt')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.default__type_boolean"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.322921, + "supported_languages": null + }, + "macro.dbt.default__type_boolean": { + "name": "default__type_boolean", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/data_types.sql", + "original_file_path": "macros/utils/data_types.sql", + "unique_id": "macro.dbt.default__type_boolean", + "macro_sql": "{%- macro default__type_boolean() -%}\n {{ return(api.Column.translate_type(\"boolean\")) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.323076, + "supported_languages": null + }, + "macro.dbt.array_concat": { + "name": "array_concat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/array_concat.sql", + "original_file_path": "macros/utils/array_concat.sql", + "unique_id": "macro.dbt.array_concat", + "macro_sql": "{% macro array_concat(array_1, array_2) -%}\n {{ return(adapter.dispatch('array_concat', 'dbt')(array_1, array_2)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__array_concat"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.323393, + "supported_languages": null + }, + "macro.dbt.default__array_concat": { + "name": "default__array_concat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/array_concat.sql", + "original_file_path": "macros/utils/array_concat.sql", + "unique_id": "macro.dbt.default__array_concat", + "macro_sql": "{% macro default__array_concat(array_1, array_2) -%}\n array_cat({{ array_1 }}, {{ array_2 }})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.323528, + "supported_languages": null + }, + "macro.dbt.bool_or": { + "name": "bool_or", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/bool_or.sql", + "original_file_path": "macros/utils/bool_or.sql", + "unique_id": "macro.dbt.bool_or", + "macro_sql": "{% macro bool_or(expression) -%}\n {{ return(adapter.dispatch('bool_or', 'dbt') (expression)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_snowflake.snowflake__bool_or"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.323793, + "supported_languages": null + }, + "macro.dbt.default__bool_or": { + "name": "default__bool_or", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/bool_or.sql", + "original_file_path": "macros/utils/bool_or.sql", + "unique_id": "macro.dbt.default__bool_or", + "macro_sql": "{% macro default__bool_or(expression) -%}\n\n bool_or({{ expression }})\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.323909, + "supported_languages": null + }, + "macro.dbt.last_day": { + "name": "last_day", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/last_day.sql", + "original_file_path": "macros/utils/last_day.sql", + "unique_id": "macro.dbt.last_day", + "macro_sql": "{% macro last_day(date, datepart) %}\n {{ return(adapter.dispatch('last_day', 'dbt') (date, datepart)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__last_day"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3242729, + "supported_languages": null + }, + "macro.dbt.default_last_day": { + "name": "default_last_day", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/last_day.sql", + "original_file_path": "macros/utils/last_day.sql", + "unique_id": "macro.dbt.default_last_day", + "macro_sql": "\n\n{%- macro default_last_day(date, datepart) -%}\n cast(\n {{dbt.dateadd('day', '-1',\n dbt.dateadd(datepart, '1', dbt.date_trunc(datepart, date))\n )}}\n as date)\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.324553, + "supported_languages": null + }, + "macro.dbt.default__last_day": { + "name": "default__last_day", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/last_day.sql", + "original_file_path": "macros/utils/last_day.sql", + "unique_id": "macro.dbt.default__last_day", + "macro_sql": "{% macro default__last_day(date, datepart) -%}\n {{dbt.default_last_day(date, datepart)}}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default_last_day"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.324708, + "supported_languages": null + }, + "macro.dbt.split_part": { + "name": "split_part", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/split_part.sql", + "original_file_path": "macros/utils/split_part.sql", + "unique_id": "macro.dbt.split_part", + "macro_sql": "{% macro split_part(string_text, delimiter_text, part_number) %}\n {{ return(adapter.dispatch('split_part', 'dbt') (string_text, delimiter_text, part_number)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__split_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.325262, + "supported_languages": null + }, + "macro.dbt.default__split_part": { + "name": "default__split_part", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/split_part.sql", + "original_file_path": "macros/utils/split_part.sql", + "unique_id": "macro.dbt.default__split_part", + "macro_sql": "{% macro default__split_part(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n {{ part_number }}\n )\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.325439, + "supported_languages": null + }, + "macro.dbt._split_part_negative": { + "name": "_split_part_negative", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/split_part.sql", + "original_file_path": "macros/utils/split_part.sql", + "unique_id": "macro.dbt._split_part_negative", + "macro_sql": "{% macro _split_part_negative(string_text, delimiter_text, part_number) %}\n\n split_part(\n {{ string_text }},\n {{ delimiter_text }},\n length({{ string_text }})\n - length(\n replace({{ string_text }}, {{ delimiter_text }}, '')\n ) + 2 + {{ part_number }}\n )\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.325681, + "supported_languages": null + }, + "macro.dbt.date_trunc": { + "name": "date_trunc", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/date_trunc.sql", + "original_file_path": "macros/utils/date_trunc.sql", + "unique_id": "macro.dbt.date_trunc", + "macro_sql": "{% macro date_trunc(datepart, date) -%}\n {{ return(adapter.dispatch('date_trunc', 'dbt') (datepart, date)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__date_trunc"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.325967, + "supported_languages": null + }, + "macro.dbt.default__date_trunc": { + "name": "default__date_trunc", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/date_trunc.sql", + "original_file_path": "macros/utils/date_trunc.sql", + "unique_id": "macro.dbt.default__date_trunc", + "macro_sql": "{% macro default__date_trunc(datepart, date) -%}\n date_trunc('{{datepart}}', {{date}})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.326105, + "supported_languages": null + }, + "macro.dbt.array_construct": { + "name": "array_construct", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/array_construct.sql", + "original_file_path": "macros/utils/array_construct.sql", + "unique_id": "macro.dbt.array_construct", + "macro_sql": "{% macro array_construct(inputs=[], data_type=api.Column.translate_type('integer')) -%}\n {{ return(adapter.dispatch('array_construct', 'dbt')(inputs, data_type)) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__array_construct"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.326493, + "supported_languages": null + }, + "macro.dbt.default__array_construct": { + "name": "default__array_construct", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/array_construct.sql", + "original_file_path": "macros/utils/array_construct.sql", + "unique_id": "macro.dbt.default__array_construct", + "macro_sql": "{% macro default__array_construct(inputs, data_type) -%}\n {% if inputs|length > 0 %}\n array[ {{ inputs|join(' , ') }} ]\n {% else %}\n array[]::{{data_type}}[]\n {% endif %}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.326761, + "supported_languages": null + }, + "macro.dbt.array_append": { + "name": "array_append", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/array_append.sql", + "original_file_path": "macros/utils/array_append.sql", + "unique_id": "macro.dbt.array_append", + "macro_sql": "{% macro array_append(array, new_element) -%}\n {{ return(adapter.dispatch('array_append', 'dbt')(array, new_element)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__array_append"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.327052, + "supported_languages": null + }, + "macro.dbt.default__array_append": { + "name": "default__array_append", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/utils/array_append.sql", + "original_file_path": "macros/utils/array_append.sql", + "unique_id": "macro.dbt.default__array_append", + "macro_sql": "{% macro default__array_append(array, new_element) -%}\n array_append({{ array }}, {{ new_element }})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.327195, + "supported_languages": null + }, + "macro.dbt.create_schema": { + "name": "create_schema", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/schema.sql", + "original_file_path": "macros/adapters/schema.sql", + "unique_id": "macro.dbt.create_schema", + "macro_sql": "{% macro create_schema(relation) -%}\n {{ adapter.dispatch('create_schema', 'dbt')(relation) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__create_schema"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.327583, + "supported_languages": null + }, + "macro.dbt.default__create_schema": { + "name": "default__create_schema", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/schema.sql", + "original_file_path": "macros/adapters/schema.sql", + "unique_id": "macro.dbt.default__create_schema", + "macro_sql": "{% macro default__create_schema(relation) -%}\n {%- call statement('create_schema') -%}\n create schema if not exists {{ relation.without_identifier() }}\n {% endcall %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.327782, + "supported_languages": null + }, + "macro.dbt.drop_schema": { + "name": "drop_schema", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/schema.sql", + "original_file_path": "macros/adapters/schema.sql", + "unique_id": "macro.dbt.drop_schema", + "macro_sql": "{% macro drop_schema(relation) -%}\n {{ adapter.dispatch('drop_schema', 'dbt')(relation) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__drop_schema"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.327947, + "supported_languages": null + }, + "macro.dbt.default__drop_schema": { + "name": "default__drop_schema", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/schema.sql", + "original_file_path": "macros/adapters/schema.sql", + "unique_id": "macro.dbt.default__drop_schema", + "macro_sql": "{% macro default__drop_schema(relation) -%}\n {%- call statement('drop_schema') -%}\n drop schema if exists {{ relation.without_identifier() }} cascade\n {% endcall %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.328223, + "supported_languages": null + }, + "macro.dbt.current_timestamp": { + "name": "current_timestamp", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.current_timestamp", + "macro_sql": "{%- macro current_timestamp() -%}\n {{ adapter.dispatch('current_timestamp', 'dbt')() }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__current_timestamp"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.328723, + "supported_languages": null + }, + "macro.dbt.default__current_timestamp": { + "name": "default__current_timestamp", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.default__current_timestamp", + "macro_sql": "{% macro default__current_timestamp() -%}\n {{ exceptions.raise_not_implemented(\n 'current_timestamp macro not implemented for adapter ' + adapter.type()) }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.328883, + "supported_languages": null + }, + "macro.dbt.snapshot_get_time": { + "name": "snapshot_get_time", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.snapshot_get_time", + "macro_sql": "\n\n{%- macro snapshot_get_time() -%}\n {{ adapter.dispatch('snapshot_get_time', 'dbt')() }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__snapshot_get_time"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.329036, + "supported_languages": null + }, + "macro.dbt.default__snapshot_get_time": { + "name": "default__snapshot_get_time", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.default__snapshot_get_time", + "macro_sql": "{% macro default__snapshot_get_time() %}\n {{ current_timestamp() }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.current_timestamp"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.329152, + "supported_languages": null + }, + "macro.dbt.current_timestamp_backcompat": { + "name": "current_timestamp_backcompat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.current_timestamp_backcompat", + "macro_sql": "{% macro current_timestamp_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__current_timestamp_backcompat" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.32933, + "supported_languages": null + }, + "macro.dbt.default__current_timestamp_backcompat": { + "name": "default__current_timestamp_backcompat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.default__current_timestamp_backcompat", + "macro_sql": "{% macro default__current_timestamp_backcompat() %}\n current_timestamp::timestamp\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.329411, + "supported_languages": null + }, + "macro.dbt.current_timestamp_in_utc_backcompat": { + "name": "current_timestamp_in_utc_backcompat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.current_timestamp_in_utc_backcompat", + "macro_sql": "{% macro current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_in_utc_backcompat', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__current_timestamp_in_utc_backcompat" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3295841, + "supported_languages": null + }, + "macro.dbt.default__current_timestamp_in_utc_backcompat": { + "name": "default__current_timestamp_in_utc_backcompat", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/timestamps.sql", + "original_file_path": "macros/adapters/timestamps.sql", + "unique_id": "macro.dbt.default__current_timestamp_in_utc_backcompat", + "macro_sql": "{% macro default__current_timestamp_in_utc_backcompat() %}\n {{ return(adapter.dispatch('current_timestamp_backcompat', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.current_timestamp_backcompat", + "macro.dbt_snowflake.snowflake__current_timestamp_backcompat" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.329758, + "supported_languages": null + }, + "macro.dbt.get_create_index_sql": { + "name": "get_create_index_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.get_create_index_sql", + "macro_sql": "{% macro get_create_index_sql(relation, index_dict) -%}\n {{ return(adapter.dispatch('get_create_index_sql', 'dbt')(relation, index_dict)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_create_index_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.330629, + "supported_languages": null + }, + "macro.dbt.default__get_create_index_sql": { + "name": "default__get_create_index_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.default__get_create_index_sql", + "macro_sql": "{% macro default__get_create_index_sql(relation, index_dict) -%}\n {% do return(None) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3307788, + "supported_languages": null + }, + "macro.dbt.create_indexes": { + "name": "create_indexes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.create_indexes", + "macro_sql": "{% macro create_indexes(relation) -%}\n {{ adapter.dispatch('create_indexes', 'dbt')(relation) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__create_indexes"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3309429, + "supported_languages": null + }, + "macro.dbt.default__create_indexes": { + "name": "default__create_indexes", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.default__create_indexes", + "macro_sql": "{% macro default__create_indexes(relation) -%}\n {%- set _indexes = config.get('indexes', default=[]) -%}\n\n {% for _index_dict in _indexes %}\n {% set create_index_sql = get_create_index_sql(relation, _index_dict) %}\n {% if create_index_sql %}\n {% do run_query(create_index_sql) %}\n {% endif %}\n {% endfor %}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.get_create_index_sql", "macro.dbt.run_query"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.331454, + "supported_languages": null + }, + "macro.dbt.get_drop_index_sql": { + "name": "get_drop_index_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.get_drop_index_sql", + "macro_sql": "{% macro get_drop_index_sql(relation, index_name) -%}\n {{ adapter.dispatch('get_drop_index_sql', 'dbt')(relation, index_name) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_drop_index_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.331796, + "supported_languages": null + }, + "macro.dbt.default__get_drop_index_sql": { + "name": "default__get_drop_index_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.default__get_drop_index_sql", + "macro_sql": "{% macro default__get_drop_index_sql(relation, index_name) -%}\n {{ exceptions.raise_compiler_error(\"`get_drop_index_sql has not been implemented for this adapter.\") }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3319612, + "supported_languages": null + }, + "macro.dbt.get_show_indexes_sql": { + "name": "get_show_indexes_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.get_show_indexes_sql", + "macro_sql": "{% macro get_show_indexes_sql(relation) -%}\n {{ adapter.dispatch('get_show_indexes_sql', 'dbt')(relation) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_show_indexes_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.332129, + "supported_languages": null + }, + "macro.dbt.default__get_show_indexes_sql": { + "name": "default__get_show_indexes_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/indexes.sql", + "original_file_path": "macros/adapters/indexes.sql", + "unique_id": "macro.dbt.default__get_show_indexes_sql", + "macro_sql": "{% macro default__get_show_indexes_sql(relation) -%}\n {{ exceptions.raise_compiler_error(\"`get_show_indexes_sql has not been implemented for this adapter.\") }}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.332265, + "supported_languages": null + }, + "macro.dbt.make_intermediate_relation": { + "name": "make_intermediate_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.make_intermediate_relation", + "macro_sql": "{% macro make_intermediate_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_intermediate_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__make_intermediate_relation"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3351529, + "supported_languages": null + }, + "macro.dbt.default__make_intermediate_relation": { + "name": "default__make_intermediate_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.default__make_intermediate_relation", + "macro_sql": "{% macro default__make_intermediate_relation(base_relation, suffix) %}\n {{ return(default__make_temp_relation(base_relation, suffix)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__make_temp_relation"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.335344, + "supported_languages": null + }, + "macro.dbt.make_temp_relation": { + "name": "make_temp_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.make_temp_relation", + "macro_sql": "{% macro make_temp_relation(base_relation, suffix='__dbt_tmp') %}\n {{ return(adapter.dispatch('make_temp_relation', 'dbt')(base_relation, suffix)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__make_temp_relation"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3355699, + "supported_languages": null + }, + "macro.dbt.default__make_temp_relation": { + "name": "default__make_temp_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.default__make_temp_relation", + "macro_sql": "{% macro default__make_temp_relation(base_relation, suffix) %}\n {%- set temp_identifier = base_relation.identifier ~ suffix -%}\n {%- set temp_relation = base_relation.incorporate(\n path={\"identifier\": temp_identifier}) -%}\n\n {{ return(temp_relation) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3359869, + "supported_languages": null + }, + "macro.dbt.make_backup_relation": { + "name": "make_backup_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.make_backup_relation", + "macro_sql": "{% macro make_backup_relation(base_relation, backup_relation_type, suffix='__dbt_backup') %}\n {{ return(adapter.dispatch('make_backup_relation', 'dbt')(base_relation, backup_relation_type, suffix)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__make_backup_relation"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.33624, + "supported_languages": null + }, + "macro.dbt.default__make_backup_relation": { + "name": "default__make_backup_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.default__make_backup_relation", + "macro_sql": "{% macro default__make_backup_relation(base_relation, backup_relation_type, suffix) %}\n {%- set backup_identifier = base_relation.identifier ~ suffix -%}\n {%- set backup_relation = base_relation.incorporate(\n path={\"identifier\": backup_identifier},\n type=backup_relation_type\n ) -%}\n {{ return(backup_relation) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.33659, + "supported_languages": null + }, + "macro.dbt.truncate_relation": { + "name": "truncate_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.truncate_relation", + "macro_sql": "{% macro truncate_relation(relation) -%}\n {{ return(adapter.dispatch('truncate_relation', 'dbt')(relation)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__truncate_relation"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.336851, + "supported_languages": null + }, + "macro.dbt.default__truncate_relation": { + "name": "default__truncate_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.default__truncate_relation", + "macro_sql": "{% macro default__truncate_relation(relation) -%}\n {% call statement('truncate_relation') -%}\n truncate table {{ relation }}\n {%- endcall %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3370528, + "supported_languages": null + }, + "macro.dbt.rename_relation": { + "name": "rename_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.rename_relation", + "macro_sql": "{% macro rename_relation(from_relation, to_relation) -%}\n {{ return(adapter.dispatch('rename_relation', 'dbt')(from_relation, to_relation)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__rename_relation"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.337266, + "supported_languages": null + }, + "macro.dbt.default__rename_relation": { + "name": "default__rename_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.default__rename_relation", + "macro_sql": "{% macro default__rename_relation(from_relation, to_relation) -%}\n {% set target_name = adapter.quote_as_configured(to_relation.identifier, 'identifier') %}\n {% call statement('rename_relation') -%}\n alter table {{ from_relation }} rename to {{ target_name }}\n {%- endcall %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3375611, + "supported_languages": null + }, + "macro.dbt.get_or_create_relation": { + "name": "get_or_create_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.get_or_create_relation", + "macro_sql": "{% macro get_or_create_relation(database, schema, identifier, type) -%}\n {{ return(adapter.dispatch('get_or_create_relation', 'dbt')(database, schema, identifier, type)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_or_create_relation"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.337826, + "supported_languages": null + }, + "macro.dbt.default__get_or_create_relation": { + "name": "default__get_or_create_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.default__get_or_create_relation", + "macro_sql": "{% macro default__get_or_create_relation(database, schema, identifier, type) %}\n {%- set target_relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n\n {% if target_relation %}\n {% do return([true, target_relation]) %}\n {% endif %}\n\n {%- set new_relation = api.Relation.create(\n database=database,\n schema=schema,\n identifier=identifier,\n type=type\n ) -%}\n {% do return([false, new_relation]) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3384142, + "supported_languages": null + }, + "macro.dbt.load_cached_relation": { + "name": "load_cached_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.load_cached_relation", + "macro_sql": "{% macro load_cached_relation(relation) %}\n {% do return(adapter.get_relation(\n database=relation.database,\n schema=relation.schema,\n identifier=relation.identifier\n )) -%}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.338651, + "supported_languages": null + }, + "macro.dbt.load_relation": { + "name": "load_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.load_relation", + "macro_sql": "{% macro load_relation(relation) %}\n {{ return(load_cached_relation(relation)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.load_cached_relation"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.338795, + "supported_languages": null + }, + "macro.dbt.drop_relation_if_exists": { + "name": "drop_relation_if_exists", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/relation.sql", + "original_file_path": "macros/adapters/relation.sql", + "unique_id": "macro.dbt.drop_relation_if_exists", + "macro_sql": "{% macro drop_relation_if_exists(relation) %}\n {% if relation is not none %}\n {{ adapter.drop_relation(relation) }}\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.338994, + "supported_languages": null + }, + "macro.dbt.collect_freshness": { + "name": "collect_freshness", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/freshness.sql", + "original_file_path": "macros/adapters/freshness.sql", + "unique_id": "macro.dbt.collect_freshness", + "macro_sql": "{% macro collect_freshness(source, loaded_at_field, filter) %}\n {{ return(adapter.dispatch('collect_freshness', 'dbt')(source, loaded_at_field, filter))}}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__collect_freshness"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.339426, + "supported_languages": null + }, + "macro.dbt.default__collect_freshness": { + "name": "default__collect_freshness", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/freshness.sql", + "original_file_path": "macros/adapters/freshness.sql", + "unique_id": "macro.dbt.default__collect_freshness", + "macro_sql": "{% macro default__collect_freshness(source, loaded_at_field, filter) %}\n {% call statement('collect_freshness', fetch_result=True, auto_begin=False) -%}\n select\n max({{ loaded_at_field }}) as max_loaded_at,\n {{ current_timestamp() }} as snapshotted_at\n from {{ source }}\n {% if filter %}\n where {{ filter }}\n {% endif %}\n {% endcall %}\n {{ return(load_result('collect_freshness')) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.statement", "macro.dbt.current_timestamp"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3398452, + "supported_languages": null + }, + "macro.dbt.validate_sql": { + "name": "validate_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/validate_sql.sql", + "original_file_path": "macros/adapters/validate_sql.sql", + "unique_id": "macro.dbt.validate_sql", + "macro_sql": "{% macro validate_sql(sql) -%}\n {{ return(adapter.dispatch('validate_sql', 'dbt')(sql)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__validate_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.340132, + "supported_languages": null + }, + "macro.dbt.default__validate_sql": { + "name": "default__validate_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/validate_sql.sql", + "original_file_path": "macros/adapters/validate_sql.sql", + "unique_id": "macro.dbt.default__validate_sql", + "macro_sql": "{% macro default__validate_sql(sql) -%}\n {% call statement('validate_sql') -%}\n explain {{ sql }}\n {% endcall %}\n {{ return(load_result('validate_sql')) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.340367, + "supported_languages": null + }, + "macro.dbt.copy_grants": { + "name": "copy_grants", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.copy_grants", + "macro_sql": "{% macro copy_grants() %}\n {{ return(adapter.dispatch('copy_grants', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__copy_grants"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3422961, + "supported_languages": null + }, + "macro.dbt.default__copy_grants": { + "name": "default__copy_grants", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__copy_grants", + "macro_sql": "{% macro default__copy_grants() %}\n {{ return(True) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.342442, + "supported_languages": null + }, + "macro.dbt.support_multiple_grantees_per_dcl_statement": { + "name": "support_multiple_grantees_per_dcl_statement", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.support_multiple_grantees_per_dcl_statement", + "macro_sql": "{% macro support_multiple_grantees_per_dcl_statement() %}\n {{ return(adapter.dispatch('support_multiple_grantees_per_dcl_statement', 'dbt')()) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__support_multiple_grantees_per_dcl_statement" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.342637, + "supported_languages": null + }, + "macro.dbt.default__support_multiple_grantees_per_dcl_statement": { + "name": "default__support_multiple_grantees_per_dcl_statement", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__support_multiple_grantees_per_dcl_statement", + "macro_sql": "\n\n{%- macro default__support_multiple_grantees_per_dcl_statement() -%}\n {{ return(True) }}\n{%- endmacro -%}\n\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.342775, + "supported_languages": null + }, + "macro.dbt.should_revoke": { + "name": "should_revoke", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.should_revoke", + "macro_sql": "{% macro should_revoke(existing_relation, full_refresh_mode=True) %}\n\n {% if not existing_relation %}\n {#-- The table doesn't already exist, so no grants to copy over --#}\n {{ return(False) }}\n {% elif full_refresh_mode %}\n {#-- The object is being REPLACED -- whether grants are copied over depends on the value of user config --#}\n {{ return(copy_grants()) }}\n {% else %}\n {#-- The table is being merged/upserted/inserted -- grants will be carried over --#}\n {{ return(True) }}\n {% endif %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.copy_grants"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3431559, + "supported_languages": null + }, + "macro.dbt.get_show_grant_sql": { + "name": "get_show_grant_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.get_show_grant_sql", + "macro_sql": "{% macro get_show_grant_sql(relation) %}\n {{ return(adapter.dispatch(\"get_show_grant_sql\", \"dbt\")(relation)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_show_grant_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3433511, + "supported_languages": null + }, + "macro.dbt.default__get_show_grant_sql": { + "name": "default__get_show_grant_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__get_show_grant_sql", + "macro_sql": "{% macro default__get_show_grant_sql(relation) %}\n show grants on {{ relation }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3434591, + "supported_languages": null + }, + "macro.dbt.get_grant_sql": { + "name": "get_grant_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.get_grant_sql", + "macro_sql": "{% macro get_grant_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_grant_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_grant_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.343998, + "supported_languages": null + }, + "macro.dbt.default__get_grant_sql": { + "name": "default__get_grant_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__get_grant_sql", + "macro_sql": "\n\n{%- macro default__get_grant_sql(relation, privilege, grantees) -%}\n grant {{ privilege }} on {{ relation }} to {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3442051, + "supported_languages": null + }, + "macro.dbt.get_revoke_sql": { + "name": "get_revoke_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.get_revoke_sql", + "macro_sql": "{% macro get_revoke_sql(relation, privilege, grantees) %}\n {{ return(adapter.dispatch('get_revoke_sql', 'dbt')(relation, privilege, grantees)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_revoke_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.34444, + "supported_languages": null + }, + "macro.dbt.default__get_revoke_sql": { + "name": "default__get_revoke_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__get_revoke_sql", + "macro_sql": "\n\n{%- macro default__get_revoke_sql(relation, privilege, grantees) -%}\n revoke {{ privilege }} on {{ relation }} from {{ grantees | join(', ') }}\n{%- endmacro -%}\n\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.344633, + "supported_languages": null + }, + "macro.dbt.get_dcl_statement_list": { + "name": "get_dcl_statement_list", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.get_dcl_statement_list", + "macro_sql": "{% macro get_dcl_statement_list(relation, grant_config, get_dcl_macro) %}\n {{ return(adapter.dispatch('get_dcl_statement_list', 'dbt')(relation, grant_config, get_dcl_macro)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_dcl_statement_list"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.344874, + "supported_languages": null + }, + "macro.dbt.default__get_dcl_statement_list": { + "name": "default__get_dcl_statement_list", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__get_dcl_statement_list", + "macro_sql": "\n\n{%- macro default__get_dcl_statement_list(relation, grant_config, get_dcl_macro) -%}\n {#\n -- Unpack grant_config into specific privileges and the set of users who need them granted/revoked.\n -- Depending on whether this database supports multiple grantees per statement, pass in the list of\n -- all grantees per privilege, or (if not) template one statement per privilege-grantee pair.\n -- `get_dcl_macro` will be either `get_grant_sql` or `get_revoke_sql`\n #}\n {%- set dcl_statements = [] -%}\n {%- for privilege, grantees in grant_config.items() %}\n {%- if support_multiple_grantees_per_dcl_statement() and grantees -%}\n {%- set dcl = get_dcl_macro(relation, privilege, grantees) -%}\n {%- do dcl_statements.append(dcl) -%}\n {%- else -%}\n {%- for grantee in grantees -%}\n {% set dcl = get_dcl_macro(relation, privilege, [grantee]) %}\n {%- do dcl_statements.append(dcl) -%}\n {% endfor -%}\n {%- endif -%}\n {%- endfor -%}\n {{ return(dcl_statements) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt.support_multiple_grantees_per_dcl_statement"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3457081, + "supported_languages": null + }, + "macro.dbt.call_dcl_statements": { + "name": "call_dcl_statements", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.call_dcl_statements", + "macro_sql": "{% macro call_dcl_statements(dcl_statement_list) %}\n {{ return(adapter.dispatch(\"call_dcl_statements\", \"dbt\")(dcl_statement_list)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__call_dcl_statements"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3459258, + "supported_languages": null + }, + "macro.dbt.default__call_dcl_statements": { + "name": "default__call_dcl_statements", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__call_dcl_statements", + "macro_sql": "{% macro default__call_dcl_statements(dcl_statement_list) %}\n {#\n -- By default, supply all grant + revoke statements in a single semicolon-separated block,\n -- so that they're all processed together.\n\n -- Some databases do not support this. Those adapters will need to override this macro\n -- to run each statement individually.\n #}\n {% call statement('grants') %}\n {% for dcl_statement in dcl_statement_list %}\n {{ dcl_statement }};\n {% endfor %}\n {% endcall %}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.346206, + "supported_languages": null + }, + "macro.dbt.apply_grants": { + "name": "apply_grants", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.apply_grants", + "macro_sql": "{% macro apply_grants(relation, grant_config, should_revoke) %}\n {{ return(adapter.dispatch(\"apply_grants\", \"dbt\")(relation, grant_config, should_revoke)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__apply_grants"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.346453, + "supported_languages": null + }, + "macro.dbt.default__apply_grants": { + "name": "default__apply_grants", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/apply_grants.sql", + "original_file_path": "macros/adapters/apply_grants.sql", + "unique_id": "macro.dbt.default__apply_grants", + "macro_sql": "{% macro default__apply_grants(relation, grant_config, should_revoke=True) %}\n {#-- If grant_config is {} or None, this is a no-op --#}\n {% if grant_config %}\n {% if should_revoke %}\n {#-- We think previous grants may have carried over --#}\n {#-- Show current grants and calculate diffs --#}\n {% set current_grants_table = run_query(get_show_grant_sql(relation)) %}\n {% set current_grants_dict = adapter.standardize_grants_dict(current_grants_table) %}\n {% set needs_granting = diff_of_two_dicts(grant_config, current_grants_dict) %}\n {% set needs_revoking = diff_of_two_dicts(current_grants_dict, grant_config) %}\n {% if not (needs_granting or needs_revoking) %}\n {{ log('On ' ~ relation ~': All grants are in place, no revocation or granting needed.')}}\n {% endif %}\n {% else %}\n {#-- We don't think there's any chance of previous grants having carried over. --#}\n {#-- Jump straight to granting what the user has configured. --#}\n {% set needs_revoking = {} %}\n {% set needs_granting = grant_config %}\n {% endif %}\n {% if needs_granting or needs_revoking %}\n {% set revoke_statement_list = get_dcl_statement_list(relation, needs_revoking, get_revoke_sql) %}\n {% set grant_statement_list = get_dcl_statement_list(relation, needs_granting, get_grant_sql) %}\n {% set dcl_statement_list = revoke_statement_list + grant_statement_list %}\n {% if dcl_statement_list %}\n {{ call_dcl_statements(dcl_statement_list) }}\n {% endif %}\n {% endif %}\n {% endif %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.run_query", + "macro.dbt.get_show_grant_sql", + "macro.dbt.get_dcl_statement_list", + "macro.dbt.call_dcl_statements" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.347722, + "supported_languages": null + }, + "macro.dbt.get_show_sql": { + "name": "get_show_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/show.sql", + "original_file_path": "macros/adapters/show.sql", + "unique_id": "macro.dbt.get_show_sql", + "macro_sql": "{% macro get_show_sql(compiled_code, sql_header, limit) -%}\n {%- if sql_header -%}\n {{ sql_header }}\n {%- endif -%}\n {%- if limit is not none -%}\n {{ get_limit_subquery_sql(compiled_code, limit) }}\n {%- else -%}\n {{ compiled_code }}\n {%- endif -%}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_limit_subquery_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.348275, + "supported_languages": null + }, + "macro.dbt.get_limit_subquery_sql": { + "name": "get_limit_subquery_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/show.sql", + "original_file_path": "macros/adapters/show.sql", + "unique_id": "macro.dbt.get_limit_subquery_sql", + "macro_sql": "{% macro get_limit_subquery_sql(sql, limit) %}\n {{ adapter.dispatch('get_limit_subquery_sql', 'dbt')(sql, limit) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_limit_subquery_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.348473, + "supported_languages": null + }, + "macro.dbt.default__get_limit_subquery_sql": { + "name": "default__get_limit_subquery_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/show.sql", + "original_file_path": "macros/adapters/show.sql", + "unique_id": "macro.dbt.default__get_limit_subquery_sql", + "macro_sql": "{% macro default__get_limit_subquery_sql(sql, limit) %}\n select *\n from (\n {{ sql }}\n ) as model_limit_subq\n limit {{ limit }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.348617, + "supported_languages": null + }, + "macro.dbt.alter_column_comment": { + "name": "alter_column_comment", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/persist_docs.sql", + "original_file_path": "macros/adapters/persist_docs.sql", + "unique_id": "macro.dbt.alter_column_comment", + "macro_sql": "{% macro alter_column_comment(relation, column_dict) -%}\n {{ return(adapter.dispatch('alter_column_comment', 'dbt')(relation, column_dict)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__alter_column_comment"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.34934, + "supported_languages": null + }, + "macro.dbt.default__alter_column_comment": { + "name": "default__alter_column_comment", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/persist_docs.sql", + "original_file_path": "macros/adapters/persist_docs.sql", + "unique_id": "macro.dbt.default__alter_column_comment", + "macro_sql": "{% macro default__alter_column_comment(relation, column_dict) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_column_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3495219, + "supported_languages": null + }, + "macro.dbt.alter_relation_comment": { + "name": "alter_relation_comment", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/persist_docs.sql", + "original_file_path": "macros/adapters/persist_docs.sql", + "unique_id": "macro.dbt.alter_relation_comment", + "macro_sql": "{% macro alter_relation_comment(relation, relation_comment) -%}\n {{ return(adapter.dispatch('alter_relation_comment', 'dbt')(relation, relation_comment)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__alter_relation_comment"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3497322, + "supported_languages": null + }, + "macro.dbt.default__alter_relation_comment": { + "name": "default__alter_relation_comment", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/persist_docs.sql", + "original_file_path": "macros/adapters/persist_docs.sql", + "unique_id": "macro.dbt.default__alter_relation_comment", + "macro_sql": "{% macro default__alter_relation_comment(relation, relation_comment) -%}\n {{ exceptions.raise_not_implemented(\n 'alter_relation_comment macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.349911, + "supported_languages": null + }, + "macro.dbt.persist_docs": { + "name": "persist_docs", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/persist_docs.sql", + "original_file_path": "macros/adapters/persist_docs.sql", + "unique_id": "macro.dbt.persist_docs", + "macro_sql": "{% macro persist_docs(relation, model, for_relation=true, for_columns=true) -%}\n {{ return(adapter.dispatch('persist_docs', 'dbt')(relation, model, for_relation, for_columns)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__persist_docs"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.350187, + "supported_languages": null + }, + "macro.dbt.default__persist_docs": { + "name": "default__persist_docs", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/persist_docs.sql", + "original_file_path": "macros/adapters/persist_docs.sql", + "unique_id": "macro.dbt.default__persist_docs", + "macro_sql": "{% macro default__persist_docs(relation, model, for_relation, for_columns) -%}\n {% if for_relation and config.persist_relation_docs() and model.description %}\n {% do run_query(alter_relation_comment(relation, model.description)) %}\n {% endif %}\n\n {% if for_columns and config.persist_column_docs() and model.columns %}\n {% do run_query(alter_column_comment(relation, model.columns)) %}\n {% endif %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.run_query", + "macro.dbt.alter_relation_comment", + "macro.dbt.alter_column_comment" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.350698, + "supported_languages": null + }, + "macro.dbt.get_catalog": { + "name": "get_catalog", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.get_catalog", + "macro_sql": "{% macro get_catalog(information_schema, schemas) -%}\n {{ return(adapter.dispatch('get_catalog', 'dbt')(information_schema, schemas)) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__get_catalog"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.352266, + "supported_languages": null + }, + "macro.dbt.default__get_catalog": { + "name": "default__get_catalog", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.default__get_catalog", + "macro_sql": "{% macro default__get_catalog(information_schema, schemas) -%}\n\n {% set typename = adapter.type() %}\n {% set msg -%}\n get_catalog not implemented for {{ typename }}\n {%- endset %}\n\n {{ exceptions.raise_compiler_error(msg) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.352538, + "supported_languages": null + }, + "macro.dbt.information_schema_name": { + "name": "information_schema_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.information_schema_name", + "macro_sql": "{% macro information_schema_name(database) %}\n {{ return(adapter.dispatch('information_schema_name', 'dbt')(database)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.default__information_schema_name"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3527622, + "supported_languages": null + }, + "macro.dbt.default__information_schema_name": { + "name": "default__information_schema_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.default__information_schema_name", + "macro_sql": "{% macro default__information_schema_name(database) -%}\n {%- if database -%}\n {{ database }}.INFORMATION_SCHEMA\n {%- else -%}\n INFORMATION_SCHEMA\n {%- endif -%}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.352939, + "supported_languages": null + }, + "macro.dbt.list_schemas": { + "name": "list_schemas", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.list_schemas", + "macro_sql": "{% macro list_schemas(database) -%}\n {{ return(adapter.dispatch('list_schemas', 'dbt')(database)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__list_schemas"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.353135, + "supported_languages": null + }, + "macro.dbt.default__list_schemas": { + "name": "default__list_schemas", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.default__list_schemas", + "macro_sql": "{% macro default__list_schemas(database) -%}\n {% set sql %}\n select distinct schema_name\n from {{ information_schema_name(database) }}.SCHEMATA\n where catalog_name ilike '{{ database }}'\n {% endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.information_schema_name", "macro.dbt.run_query"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3534908, + "supported_languages": null + }, + "macro.dbt.check_schema_exists": { + "name": "check_schema_exists", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.check_schema_exists", + "macro_sql": "{% macro check_schema_exists(information_schema, schema) -%}\n {{ return(adapter.dispatch('check_schema_exists', 'dbt')(information_schema, schema)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__check_schema_exists"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.353735, + "supported_languages": null + }, + "macro.dbt.default__check_schema_exists": { + "name": "default__check_schema_exists", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.default__check_schema_exists", + "macro_sql": "{% macro default__check_schema_exists(information_schema, schema) -%}\n {% set sql -%}\n select count(*)\n from {{ information_schema.replace(information_schema_view='SCHEMATA') }}\n where catalog_name='{{ information_schema.database }}'\n and schema_name='{{ schema }}'\n {%- endset %}\n {{ return(run_query(sql)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.replace", "macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3540711, + "supported_languages": null + }, + "macro.dbt.list_relations_without_caching": { + "name": "list_relations_without_caching", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.list_relations_without_caching", + "macro_sql": "{% macro list_relations_without_caching(schema_relation) %}\n {{ return(adapter.dispatch('list_relations_without_caching', 'dbt')(schema_relation)) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__list_relations_without_caching" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.354295, + "supported_languages": null + }, + "macro.dbt.default__list_relations_without_caching": { + "name": "default__list_relations_without_caching", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/metadata.sql", + "original_file_path": "macros/adapters/metadata.sql", + "unique_id": "macro.dbt.default__list_relations_without_caching", + "macro_sql": "{% macro default__list_relations_without_caching(schema_relation) %}\n {{ exceptions.raise_not_implemented(\n 'list_relations_without_caching macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3544781, + "supported_languages": null + }, + "macro.dbt.drop_relation": { + "name": "drop_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.drop_relation", + "macro_sql": "{% macro drop_relation(relation) -%}\n {{ return(adapter.dispatch('drop_relation', 'dbt')(relation)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__drop_relation"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3554718, + "supported_languages": null + }, + "macro.dbt.default__drop_relation": { + "name": "default__drop_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.default__drop_relation", + "macro_sql": "{% macro default__drop_relation(relation) -%}\n {% call statement('drop_relation', auto_begin=False) -%}\n {%- if relation.is_table -%}\n {{- drop_table(relation) -}}\n {%- elif relation.is_view -%}\n {{- drop_view(relation) -}}\n {%- elif relation.is_materialized_view -%}\n {{- drop_materialized_view(relation) -}}\n {%- else -%}\n drop {{ relation.type }} if exists {{ relation }} cascade\n {%- endif -%}\n {%- endcall %}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.statement", + "macro.dbt.drop_table", + "macro.dbt.drop_view", + "macro.dbt.drop_materialized_view" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.355965, + "supported_languages": null + }, + "macro.dbt.drop_table": { + "name": "drop_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.drop_table", + "macro_sql": "{% macro drop_table(relation) -%}\n {{ return(adapter.dispatch('drop_table', 'dbt')(relation)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__drop_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.356164, + "supported_languages": null + }, + "macro.dbt.default__drop_table": { + "name": "default__drop_table", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.default__drop_table", + "macro_sql": "{% macro default__drop_table(relation) -%}\n drop table if exists {{ relation }} cascade\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.356276, + "supported_languages": null + }, + "macro.dbt.drop_view": { + "name": "drop_view", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.drop_view", + "macro_sql": "{% macro drop_view(relation) -%}\n {{ return(adapter.dispatch('drop_view', 'dbt')(relation)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__drop_view"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3564742, + "supported_languages": null + }, + "macro.dbt.default__drop_view": { + "name": "default__drop_view", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.default__drop_view", + "macro_sql": "{% macro default__drop_view(relation) -%}\n drop view if exists {{ relation }} cascade\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.35659, + "supported_languages": null + }, + "macro.dbt.drop_materialized_view": { + "name": "drop_materialized_view", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.drop_materialized_view", + "macro_sql": "{% macro drop_materialized_view(relation) -%}\n {{ return(adapter.dispatch('drop_materialized_view', 'dbt')(relation)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__drop_materialized_view"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.356778, + "supported_languages": null + }, + "macro.dbt.default__drop_materialized_view": { + "name": "default__drop_materialized_view", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/drop_relation.sql", + "original_file_path": "macros/adapters/drop_relation.sql", + "unique_id": "macro.dbt.default__drop_materialized_view", + "macro_sql": "{% macro default__drop_materialized_view(relation) -%}\n drop materialized view if exists {{ relation }} cascade\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3568902, + "supported_languages": null + }, + "macro.dbt.get_columns_in_relation": { + "name": "get_columns_in_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.get_columns_in_relation", + "macro_sql": "{% macro get_columns_in_relation(relation) -%}\n {{ return(adapter.dispatch('get_columns_in_relation', 'dbt')(relation)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__get_columns_in_relation"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3592498, + "supported_languages": null + }, + "macro.dbt.default__get_columns_in_relation": { + "name": "default__get_columns_in_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.default__get_columns_in_relation", + "macro_sql": "{% macro default__get_columns_in_relation(relation) -%}\n {{ exceptions.raise_not_implemented(\n 'get_columns_in_relation macro not implemented for adapter '+adapter.type()) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3594182, + "supported_languages": null + }, + "macro.dbt.sql_convert_columns_in_relation": { + "name": "sql_convert_columns_in_relation", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.sql_convert_columns_in_relation", + "macro_sql": "{% macro sql_convert_columns_in_relation(table) -%}\n {% set columns = [] %}\n {% for row in table %}\n {% do columns.append(api.Column(*row)) %}\n {% endfor %}\n {{ return(columns) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.359744, + "supported_languages": null + }, + "macro.dbt.get_empty_subquery_sql": { + "name": "get_empty_subquery_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.get_empty_subquery_sql", + "macro_sql": "{% macro get_empty_subquery_sql(select_sql, select_sql_header=none) -%}\n {{ return(adapter.dispatch('get_empty_subquery_sql', 'dbt')(select_sql, select_sql_header)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_empty_subquery_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3599691, + "supported_languages": null + }, + "macro.dbt.default__get_empty_subquery_sql": { + "name": "default__get_empty_subquery_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.default__get_empty_subquery_sql", + "macro_sql": "{% macro default__get_empty_subquery_sql(select_sql, select_sql_header=none) %}\n {%- if select_sql_header is not none -%}\n {{ select_sql_header }}\n {%- endif -%}\n select * from (\n {{ select_sql }}\n ) as __dbt_sbq\n where false\n limit 0\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.360182, + "supported_languages": null + }, + "macro.dbt.get_empty_schema_sql": { + "name": "get_empty_schema_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.get_empty_schema_sql", + "macro_sql": "{% macro get_empty_schema_sql(columns) -%}\n {{ return(adapter.dispatch('get_empty_schema_sql', 'dbt')(columns)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_empty_schema_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.360372, + "supported_languages": null + }, + "macro.dbt.default__get_empty_schema_sql": { + "name": "default__get_empty_schema_sql", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.default__get_empty_schema_sql", + "macro_sql": "{% macro default__get_empty_schema_sql(columns) %}\n {%- set col_err = [] -%}\n select\n {% for i in columns %}\n {%- set col = columns[i] -%}\n {%- if col['data_type'] is not defined -%}\n {{ col_err.append(col['name']) }}\n {%- endif -%}\n {% set col_name = adapter.quote(col['name']) if col.get('quote') else col['name'] %}\n cast(null as {{ col['data_type'] }}) as {{ col_name }}{{ \", \" if not loop.last }}\n {%- endfor -%}\n {%- if (col_err | length) > 0 -%}\n {{ exceptions.column_type_missing(column_names=col_err) }}\n {%- endif -%}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.361313, + "supported_languages": null + }, + "macro.dbt.get_column_schema_from_query": { + "name": "get_column_schema_from_query", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.get_column_schema_from_query", + "macro_sql": "{% macro get_column_schema_from_query(select_sql, select_sql_header=none) -%}\n {% set columns = [] %}\n {# -- Using an 'empty subquery' here to get the same schema as the given select_sql statement, without necessitating a data scan.#}\n {% set sql = get_empty_subquery_sql(select_sql, select_sql_header) %}\n {% set column_schema = adapter.get_column_schema_from_query(sql) %}\n {{ return(column_schema) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.get_empty_subquery_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.361774, + "supported_languages": null + }, + "macro.dbt.get_columns_in_query": { + "name": "get_columns_in_query", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.get_columns_in_query", + "macro_sql": "{% macro get_columns_in_query(select_sql) -%}\n {{ return(adapter.dispatch('get_columns_in_query', 'dbt')(select_sql)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__get_columns_in_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.36198, + "supported_languages": null + }, + "macro.dbt.default__get_columns_in_query": { + "name": "default__get_columns_in_query", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.default__get_columns_in_query", + "macro_sql": "{% macro default__get_columns_in_query(select_sql) %}\n {% call statement('get_columns_in_query', fetch_result=True, auto_begin=False) -%}\n {{ get_empty_subquery_sql(select_sql) }}\n {% endcall %}\n {{ return(load_result('get_columns_in_query').table.columns | map(attribute='name') | list) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.statement", "macro.dbt.get_empty_subquery_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.362349, + "supported_languages": null + }, + "macro.dbt.alter_column_type": { + "name": "alter_column_type", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.alter_column_type", + "macro_sql": "{% macro alter_column_type(relation, column_name, new_column_type) -%}\n {{ return(adapter.dispatch('alter_column_type', 'dbt')(relation, column_name, new_column_type)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_snowflake.snowflake__alter_column_type"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.362596, + "supported_languages": null + }, + "macro.dbt.default__alter_column_type": { + "name": "default__alter_column_type", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.default__alter_column_type", + "macro_sql": "{% macro default__alter_column_type(relation, column_name, new_column_type) -%}\n {#\n 1. Create a new column (w/ temp name and correct type)\n 2. Copy data over to it\n 3. Drop the existing column (cascade!)\n 4. Rename the new column to existing column\n #}\n {%- set tmp_column = column_name + \"__dbt_alter\" -%}\n\n {% call statement('alter_column_type') %}\n alter table {{ relation }} add column {{ adapter.quote(tmp_column) }} {{ new_column_type }};\n update {{ relation }} set {{ adapter.quote(tmp_column) }} = {{ adapter.quote(column_name) }};\n alter table {{ relation }} drop column {{ adapter.quote(column_name) }} cascade;\n alter table {{ relation }} rename column {{ adapter.quote(tmp_column) }} to {{ adapter.quote(column_name) }}\n {% endcall %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.363265, + "supported_languages": null + }, + "macro.dbt.alter_relation_add_remove_columns": { + "name": "alter_relation_add_remove_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.alter_relation_add_remove_columns", + "macro_sql": "{% macro alter_relation_add_remove_columns(relation, add_columns = none, remove_columns = none) -%}\n {{ return(adapter.dispatch('alter_relation_add_remove_columns', 'dbt')(relation, add_columns, remove_columns)) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snowflake.snowflake__alter_relation_add_remove_columns" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.363557, + "supported_languages": null + }, + "macro.dbt.default__alter_relation_add_remove_columns": { + "name": "default__alter_relation_add_remove_columns", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/adapters/columns.sql", + "original_file_path": "macros/adapters/columns.sql", + "unique_id": "macro.dbt.default__alter_relation_add_remove_columns", + "macro_sql": "{% macro default__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}\n\n {% if add_columns is none %}\n {% set add_columns = [] %}\n {% endif %}\n {% if remove_columns is none %}\n {% set remove_columns = [] %}\n {% endif %}\n\n {% set sql -%}\n\n alter {{ relation.type }} {{ relation }}\n\n {% for column in add_columns %}\n add column {{ column.name }} {{ column.data_type }}{{ ',' if not loop.last }}\n {% endfor %}{{ ',' if add_columns and remove_columns }}\n\n {% for column in remove_columns %}\n drop column {{ column.name }}{{ ',' if not loop.last }}\n {% endfor %}\n\n {%- endset -%}\n\n {% do run_query(sql) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3644001, + "supported_languages": null + }, + "macro.dbt.resolve_model_name": { + "name": "resolve_model_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/python_model/python.sql", + "original_file_path": "macros/python_model/python.sql", + "unique_id": "macro.dbt.resolve_model_name", + "macro_sql": "{% macro resolve_model_name(input_model_name) %}\n {{ return(adapter.dispatch('resolve_model_name', 'dbt')(input_model_name)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.default__resolve_model_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.366153, + "supported_languages": null + }, + "macro.dbt.default__resolve_model_name": { + "name": "default__resolve_model_name", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/python_model/python.sql", + "original_file_path": "macros/python_model/python.sql", + "unique_id": "macro.dbt.default__resolve_model_name", + "macro_sql": "\n\n{%- macro default__resolve_model_name(input_model_name) -%}\n {{ input_model_name | string | replace('\"', '\\\"') }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3663201, + "supported_languages": null + }, + "macro.dbt.build_ref_function": { + "name": "build_ref_function", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/python_model/python.sql", + "original_file_path": "macros/python_model/python.sql", + "unique_id": "macro.dbt.build_ref_function", + "macro_sql": "{% macro build_ref_function(model) %}\n\n {%- set ref_dict = {} -%}\n {%- for _ref in model.refs -%}\n {% set _ref_args = [_ref.get('package'), _ref['name']] if _ref.get('package') else [_ref['name'],] %}\n {%- set resolved = ref(*_ref_args, v=_ref.get('version')) -%}\n {%- if _ref.get('version') -%}\n {% do _ref_args.extend([\"v\" ~ _ref['version']]) %}\n {%- endif -%}\n {%- do ref_dict.update({_ref_args | join('.'): resolve_model_name(resolved)}) -%}\n {%- endfor -%}\n\ndef ref(*args, **kwargs):\n refs = {{ ref_dict | tojson }}\n key = '.'.join(args)\n version = kwargs.get(\"v\") or kwargs.get(\"version\")\n if version:\n key += f\".v{version}\"\n dbt_load_df_function = kwargs.get(\"dbt_load_df_function\")\n return dbt_load_df_function(refs[key])\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.resolve_model_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.367177, + "supported_languages": null + }, + "macro.dbt.build_source_function": { + "name": "build_source_function", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/python_model/python.sql", + "original_file_path": "macros/python_model/python.sql", + "unique_id": "macro.dbt.build_source_function", + "macro_sql": "{% macro build_source_function(model) %}\n\n {%- set source_dict = {} -%}\n {%- for _source in model.sources -%}\n {%- set resolved = source(*_source) -%}\n {%- do source_dict.update({_source | join('.'): resolve_model_name(resolved)}) -%}\n {%- endfor -%}\n\ndef source(*args, dbt_load_df_function):\n sources = {{ source_dict | tojson }}\n key = '.'.join(args)\n return dbt_load_df_function(sources[key])\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.resolve_model_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.367604, + "supported_languages": null + }, + "macro.dbt.build_config_dict": { + "name": "build_config_dict", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/python_model/python.sql", + "original_file_path": "macros/python_model/python.sql", + "unique_id": "macro.dbt.build_config_dict", + "macro_sql": "{% macro build_config_dict(model) %}\n {%- set config_dict = {} -%}\n {% set config_dbt_used = zip(model.config.config_keys_used, model.config.config_keys_defaults) | list %}\n {%- for key, default in config_dbt_used -%}\n {# weird type testing with enum, would be much easier to write this logic in Python! #}\n {%- if key == \"language\" -%}\n {%- set value = \"python\" -%}\n {%- endif -%}\n {%- set value = model.config.get(key, default) -%}\n {%- do config_dict.update({key: value}) -%}\n {%- endfor -%}\nconfig_dict = {{ config_dict }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.368205, + "supported_languages": null + }, + "macro.dbt.py_script_postfix": { + "name": "py_script_postfix", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/python_model/python.sql", + "original_file_path": "macros/python_model/python.sql", + "unique_id": "macro.dbt.py_script_postfix", + "macro_sql": "{% macro py_script_postfix(model) %}\n# This part is user provided model code\n# you will need to copy the next section to run the code\n# COMMAND ----------\n# this part is dbt logic for get ref work, do not modify\n\n{{ build_ref_function(model ) }}\n{{ build_source_function(model ) }}\n{{ build_config_dict(model) }}\n\nclass config:\n def __init__(self, *args, **kwargs):\n pass\n\n @staticmethod\n def get(key, default=None):\n return config_dict.get(key, default)\n\nclass this:\n \"\"\"dbt.this() or dbt.this.identifier\"\"\"\n database = \"{{ this.database }}\"\n schema = \"{{ this.schema }}\"\n identifier = \"{{ this.identifier }}\"\n {% set this_relation_name = resolve_model_name(this) %}\n def __repr__(self):\n return '{{ this_relation_name }}'\n\n\nclass dbtObj:\n def __init__(self, load_df_function) -> None:\n self.source = lambda *args: source(*args, dbt_load_df_function=load_df_function)\n self.ref = lambda *args, **kwargs: ref(*args, **kwargs, dbt_load_df_function=load_df_function)\n self.config = config\n self.this = this()\n self.is_incremental = {{ is_incremental() }}\n\n# COMMAND ----------\n{{py_script_comment()}}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.build_ref_function", + "macro.dbt.build_source_function", + "macro.dbt.build_config_dict", + "macro.dbt.resolve_model_name", + "macro.dbt.is_incremental", + "macro.dbt.py_script_comment" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.368702, + "supported_languages": null + }, + "macro.dbt.py_script_comment": { + "name": "py_script_comment", + "resource_type": "macro", + "package_name": "dbt", + "path": "macros/python_model/python.sql", + "original_file_path": "macros/python_model/python.sql", + "unique_id": "macro.dbt.py_script_comment", + "macro_sql": "{%macro py_script_comment()%}\n{%endmacro%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.368782, + "supported_languages": null + }, + "macro.dbt.test_unique": { + "name": "test_unique", + "resource_type": "macro", + "package_name": "dbt", + "path": "tests/generic/builtin.sql", + "original_file_path": "tests/generic/builtin.sql", + "unique_id": "macro.dbt.test_unique", + "macro_sql": "{% test unique(model, column_name) %}\n {% set macro = adapter.dispatch('test_unique', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt.default__test_unique"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.369358, + "supported_languages": null + }, + "macro.dbt.test_not_null": { + "name": "test_not_null", + "resource_type": "macro", + "package_name": "dbt", + "path": "tests/generic/builtin.sql", + "original_file_path": "tests/generic/builtin.sql", + "unique_id": "macro.dbt.test_not_null", + "macro_sql": "{% test not_null(model, column_name) %}\n {% set macro = adapter.dispatch('test_not_null', 'dbt') %}\n {{ macro(model, column_name) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt.default__test_not_null"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.36962, + "supported_languages": null + }, + "macro.dbt.test_accepted_values": { + "name": "test_accepted_values", + "resource_type": "macro", + "package_name": "dbt", + "path": "tests/generic/builtin.sql", + "original_file_path": "tests/generic/builtin.sql", + "unique_id": "macro.dbt.test_accepted_values", + "macro_sql": "{% test accepted_values(model, column_name, values, quote=True) %}\n {% set macro = adapter.dispatch('test_accepted_values', 'dbt') %}\n {{ macro(model, column_name, values, quote) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt.default__test_accepted_values"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.369932, + "supported_languages": null + }, + "macro.dbt.test_relationships": { + "name": "test_relationships", + "resource_type": "macro", + "package_name": "dbt", + "path": "tests/generic/builtin.sql", + "original_file_path": "tests/generic/builtin.sql", + "unique_id": "macro.dbt.test_relationships", + "macro_sql": "{% test relationships(model, column_name, to, field) %}\n {% set macro = adapter.dispatch('test_relationships', 'dbt') %}\n {{ macro(model, column_name, to, field) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt.default__test_relationships"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3702888, + "supported_languages": null + }, + "macro.dbt_utils.get_url_host": { + "name": "get_url_host", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/web/get_url_host.sql", + "original_file_path": "macros/web/get_url_host.sql", + "unique_id": "macro.dbt_utils.get_url_host", + "macro_sql": "{% macro get_url_host(field) -%}\n {{ return(adapter.dispatch('get_url_host', 'dbt_utils')(field)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__get_url_host"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.370712, + "supported_languages": null + }, + "macro.dbt_utils.default__get_url_host": { + "name": "default__get_url_host", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/web/get_url_host.sql", + "original_file_path": "macros/web/get_url_host.sql", + "unique_id": "macro.dbt_utils.default__get_url_host", + "macro_sql": "{% macro default__get_url_host(field) -%}\n\n{%- set parsed =\n dbt.split_part(\n dbt.split_part(\n dbt.replace(\n dbt.replace(\n dbt.replace(field, \"'android-app://'\", \"''\"\n ), \"'http://'\", \"''\"\n ), \"'https://'\", \"''\"\n ), \"'/'\", 1\n ), \"'?'\", 1\n )\n\n-%}\n\n\n {{ dbt.safe_cast(\n parsed,\n dbt.type_string()\n )}}\n\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.split_part", + "macro.dbt.replace", + "macro.dbt.safe_cast", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.371244, + "supported_languages": null + }, + "macro.dbt_utils.get_url_path": { + "name": "get_url_path", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/web/get_url_path.sql", + "original_file_path": "macros/web/get_url_path.sql", + "unique_id": "macro.dbt_utils.get_url_path", + "macro_sql": "{% macro get_url_path(field) -%}\n {{ return(adapter.dispatch('get_url_path', 'dbt_utils')(field)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__get_url_path"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3717592, + "supported_languages": null + }, + "macro.dbt_utils.default__get_url_path": { + "name": "default__get_url_path", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/web/get_url_path.sql", + "original_file_path": "macros/web/get_url_path.sql", + "unique_id": "macro.dbt_utils.default__get_url_path", + "macro_sql": "{% macro default__get_url_path(field) -%}\n\n {%- set stripped_url =\n dbt.replace(\n dbt.replace(field, \"'http://'\", \"''\"), \"'https://'\", \"''\")\n -%}\n\n {%- set first_slash_pos -%}\n coalesce(\n nullif({{ dbt.position(\"'/'\", stripped_url) }}, 0),\n {{ dbt.position(\"'?'\", stripped_url) }} - 1\n )\n {%- endset -%}\n\n {%- set parsed_path =\n dbt.split_part(\n dbt.right(\n stripped_url,\n dbt.length(stripped_url) ~ \"-\" ~ first_slash_pos\n ),\n \"'?'\", 1\n )\n -%}\n\n {{ dbt.safe_cast(\n parsed_path,\n dbt.type_string()\n )}}\n\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.replace", + "macro.dbt.position", + "macro.dbt.split_part", + "macro.dbt.right", + "macro.dbt.length", + "macro.dbt.safe_cast", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.372545, + "supported_languages": null + }, + "macro.dbt_utils.get_url_parameter": { + "name": "get_url_parameter", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/web/get_url_parameter.sql", + "original_file_path": "macros/web/get_url_parameter.sql", + "unique_id": "macro.dbt_utils.get_url_parameter", + "macro_sql": "{% macro get_url_parameter(field, url_parameter) -%}\n {{ return(adapter.dispatch('get_url_parameter', 'dbt_utils')(field, url_parameter)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_url_parameter"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.372885, + "supported_languages": null + }, + "macro.dbt_utils.default__get_url_parameter": { + "name": "default__get_url_parameter", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/web/get_url_parameter.sql", + "original_file_path": "macros/web/get_url_parameter.sql", + "unique_id": "macro.dbt_utils.default__get_url_parameter", + "macro_sql": "{% macro default__get_url_parameter(field, url_parameter) -%}\n\n{%- set formatted_url_parameter = \"'\" + url_parameter + \"='\" -%}\n\n{%- set split = dbt.split_part(dbt.split_part(field, formatted_url_parameter, 2), \"'&'\", 1) -%}\n\nnullif({{ split }},'')\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.split_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.373237, + "supported_languages": null + }, + "macro.dbt_utils.test_fewer_rows_than": { + "name": "test_fewer_rows_than", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/fewer_rows_than.sql", + "original_file_path": "macros/generic_tests/fewer_rows_than.sql", + "unique_id": "macro.dbt_utils.test_fewer_rows_than", + "macro_sql": "{% test fewer_rows_than(model, compare_model, group_by_columns = []) %}\n {{ return(adapter.dispatch('test_fewer_rows_than', 'dbt_utils')(model, compare_model, group_by_columns)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_fewer_rows_than"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.374389, + "supported_languages": null + }, + "macro.dbt_utils.default__test_fewer_rows_than": { + "name": "default__test_fewer_rows_than", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/fewer_rows_than.sql", + "original_file_path": "macros/generic_tests/fewer_rows_than.sql", + "unique_id": "macro.dbt_utils.default__test_fewer_rows_than", + "macro_sql": "{% macro default__test_fewer_rows_than(model, compare_model, group_by_columns) %}\n\n{{ config(fail_calc = 'sum(coalesce(row_count_delta, 0))') }}\n\n{% if group_by_columns|length() > 0 %}\n {% set select_gb_cols = group_by_columns|join(' ,') + ', ' %}\n {% set join_gb_cols %}\n {% for c in group_by_columns %}\n and a.{{c}} = b.{{c}}\n {% endfor %}\n {% endset %}\n {% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n{% endif %}\n\n{#-- We must add a fake join key in case additional grouping variables are not provided --#}\n{#-- Redshift does not allow for dynamically created join conditions (e.g. full join on 1 = 1 --#}\n{#-- The same logic is used in equal_rowcount. In case of changes, maintain consistent logic --#}\n{% set group_by_columns = ['id_dbtutils_test_fewer_rows_than'] + group_by_columns %}\n{% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n\n\nwith a as (\n\n select \n {{select_gb_cols}}\n 1 as id_dbtutils_test_fewer_rows_than,\n count(*) as count_our_model \n from {{ model }}\n {{ groupby_gb_cols }}\n\n),\nb as (\n\n select \n {{select_gb_cols}}\n 1 as id_dbtutils_test_fewer_rows_than,\n count(*) as count_comparison_model \n from {{ compare_model }}\n {{ groupby_gb_cols }}\n\n),\ncounts as (\n\n select\n\n {% for c in group_by_columns -%}\n a.{{c}} as {{c}}_a,\n b.{{c}} as {{c}}_b,\n {% endfor %}\n\n count_our_model,\n count_comparison_model\n from a\n full join b on \n a.id_dbtutils_test_fewer_rows_than = b.id_dbtutils_test_fewer_rows_than\n {{ join_gb_cols }}\n\n),\nfinal as (\n\n select *,\n case\n -- fail the test if we have more rows than the reference model and return the row count delta\n when count_our_model > count_comparison_model then (count_our_model - count_comparison_model)\n -- fail the test if they are the same number\n when count_our_model = count_comparison_model then 1\n -- pass the test if the delta is positive (i.e. return the number 0)\n else 0\n end as row_count_delta\n from counts\n\n)\n\nselect * from final\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.375475, + "supported_languages": null + }, + "macro.dbt_utils.test_equal_rowcount": { + "name": "test_equal_rowcount", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/equal_rowcount.sql", + "original_file_path": "macros/generic_tests/equal_rowcount.sql", + "unique_id": "macro.dbt_utils.test_equal_rowcount", + "macro_sql": "{% test equal_rowcount(model, compare_model, group_by_columns = []) %}\n {{ return(adapter.dispatch('test_equal_rowcount', 'dbt_utils')(model, compare_model, group_by_columns)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_equal_rowcount"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.376384, + "supported_languages": null + }, + "macro.dbt_utils.default__test_equal_rowcount": { + "name": "default__test_equal_rowcount", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/equal_rowcount.sql", + "original_file_path": "macros/generic_tests/equal_rowcount.sql", + "unique_id": "macro.dbt_utils.default__test_equal_rowcount", + "macro_sql": "{% macro default__test_equal_rowcount(model, compare_model, group_by_columns) %}\n\n{#-- Needs to be set at parse time, before we return '' below --#}\n{{ config(fail_calc = 'sum(coalesce(diff_count, 0))') }}\n\n{#-- Prevent querying of db in parsing mode. This works because this macro does not create any new refs. #}\n{%- if not execute -%}\n {{ return('') }}\n{% endif %}\n\n{% if group_by_columns|length() > 0 %}\n {% set select_gb_cols = group_by_columns|join(', ') + ', ' %}\n {% set join_gb_cols %}\n {% for c in group_by_columns %}\n and a.{{c}} = b.{{c}}\n {% endfor %}\n {% endset %}\n {% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n{% endif %}\n\n{#-- We must add a fake join key in case additional grouping variables are not provided --#}\n{#-- Redshift does not allow for dynamically created join conditions (e.g. full join on 1 = 1 --#}\n{#-- The same logic is used in fewer_rows_than. In case of changes, maintain consistent logic --#}\n{% set group_by_columns = ['id_dbtutils_test_equal_rowcount'] + group_by_columns %}\n{% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n\nwith a as (\n\n select \n {{select_gb_cols}}\n 1 as id_dbtutils_test_equal_rowcount,\n count(*) as count_a \n from {{ model }}\n {{groupby_gb_cols}}\n\n\n),\nb as (\n\n select \n {{select_gb_cols}}\n 1 as id_dbtutils_test_equal_rowcount,\n count(*) as count_b \n from {{ compare_model }}\n {{groupby_gb_cols}}\n\n),\nfinal as (\n\n select\n \n {% for c in group_by_columns -%}\n a.{{c}} as {{c}}_a,\n b.{{c}} as {{c}}_b,\n {% endfor %}\n\n count_a,\n count_b,\n abs(count_a - count_b) as diff_count\n\n from a\n full join b\n on\n a.id_dbtutils_test_equal_rowcount = b.id_dbtutils_test_equal_rowcount\n {{join_gb_cols}}\n\n\n)\n\nselect * from final\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.377556, + "supported_languages": null + }, + "macro.dbt_utils.test_relationships_where": { + "name": "test_relationships_where", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/relationships_where.sql", + "original_file_path": "macros/generic_tests/relationships_where.sql", + "unique_id": "macro.dbt_utils.test_relationships_where", + "macro_sql": "{% test relationships_where(model, column_name, to, field, from_condition=\"1=1\", to_condition=\"1=1\") %}\n {{ return(adapter.dispatch('test_relationships_where', 'dbt_utils')(model, column_name, to, field, from_condition, to_condition)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_relationships_where"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.378443, + "supported_languages": null + }, + "macro.dbt_utils.default__test_relationships_where": { + "name": "default__test_relationships_where", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/relationships_where.sql", + "original_file_path": "macros/generic_tests/relationships_where.sql", + "unique_id": "macro.dbt_utils.default__test_relationships_where", + "macro_sql": "{% macro default__test_relationships_where(model, column_name, to, field, from_condition=\"1=1\", to_condition=\"1=1\") %}\n\n{# T-SQL has no boolean data type so we use 1=1 which returns TRUE #}\n{# ref https://stackoverflow.com/a/7170753/3842610 #}\n\nwith left_table as (\n\n select\n {{column_name}} as id\n\n from {{model}}\n\n where {{column_name}} is not null\n and {{from_condition}}\n\n),\n\nright_table as (\n\n select\n {{field}} as id\n\n from {{to}}\n\n where {{field}} is not null\n and {{to_condition}}\n\n),\n\nexceptions as (\n\n select\n left_table.id,\n right_table.id as right_id\n\n from left_table\n\n left join right_table\n on left_table.id = right_table.id\n\n where right_table.id is null\n\n)\n\nselect * from exceptions\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.378859, + "supported_languages": null + }, + "macro.dbt_utils.test_recency": { + "name": "test_recency", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/recency.sql", + "original_file_path": "macros/generic_tests/recency.sql", + "unique_id": "macro.dbt_utils.test_recency", + "macro_sql": "{% test recency(model, field, datepart, interval, ignore_time_component=False, group_by_columns = []) %}\n {{ return(adapter.dispatch('test_recency', 'dbt_utils')(model, field, datepart, interval, ignore_time_component, group_by_columns)) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_utils.default__test_recency"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.379638, + "supported_languages": null + }, + "macro.dbt_utils.default__test_recency": { + "name": "default__test_recency", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/recency.sql", + "original_file_path": "macros/generic_tests/recency.sql", + "unique_id": "macro.dbt_utils.default__test_recency", + "macro_sql": "{% macro default__test_recency(model, field, datepart, interval, ignore_time_component, group_by_columns) %}\n\n{% set threshold = 'cast(' ~ dbt.dateadd(datepart, interval * -1, dbt.current_timestamp()) ~ ' as ' ~ ('date' if ignore_time_component else dbt.type_timestamp()) ~ ')' %}\n\n{% if group_by_columns|length() > 0 %}\n {% set select_gb_cols = group_by_columns|join(' ,') + ', ' %}\n {% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n{% endif %}\n\n\nwith recency as (\n\n select \n\n {{ select_gb_cols }}\n {% if ignore_time_component %}\n cast(max({{ field }}) as date) as most_recent\n {%- else %}\n max({{ field }}) as most_recent\n {%- endif %}\n\n from {{ model }}\n\n {{ groupby_gb_cols }}\n\n)\n\nselect\n\n {{ select_gb_cols }}\n most_recent,\n {{ threshold }} as threshold\n\nfrom recency\nwhere most_recent < {{ threshold }}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.dateadd", + "macro.dbt.current_timestamp", + "macro.dbt.type_timestamp" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.380543, + "supported_languages": null + }, + "macro.dbt_utils.test_not_constant": { + "name": "test_not_constant", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_constant.sql", + "original_file_path": "macros/generic_tests/not_constant.sql", + "unique_id": "macro.dbt_utils.test_not_constant", + "macro_sql": "{% test not_constant(model, column_name, group_by_columns = []) %}\n {{ return(adapter.dispatch('test_not_constant', 'dbt_utils')(model, column_name, group_by_columns)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_not_constant"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.381003, + "supported_languages": null + }, + "macro.dbt_utils.default__test_not_constant": { + "name": "default__test_not_constant", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_constant.sql", + "original_file_path": "macros/generic_tests/not_constant.sql", + "unique_id": "macro.dbt_utils.default__test_not_constant", + "macro_sql": "{% macro default__test_not_constant(model, column_name, group_by_columns) %}\n\n{% if group_by_columns|length() > 0 %}\n {% set select_gb_cols = group_by_columns|join(' ,') + ', ' %}\n {% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n{% endif %}\n\n\nselect\n {# In TSQL, subquery aggregate columns need aliases #}\n {# thus: a filler col name, 'filler_column' #}\n {{select_gb_cols}}\n count(distinct {{ column_name }}) as filler_column\n\nfrom {{ model }}\n\n {{groupby_gb_cols}}\n\nhaving count(distinct {{ column_name }}) = 1\n\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.38147, + "supported_languages": null + }, + "macro.dbt_utils.test_accepted_range": { + "name": "test_accepted_range", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/accepted_range.sql", + "original_file_path": "macros/generic_tests/accepted_range.sql", + "unique_id": "macro.dbt_utils.test_accepted_range", + "macro_sql": "{% test accepted_range(model, column_name, min_value=none, max_value=none, inclusive=true) %}\n {{ return(adapter.dispatch('test_accepted_range', 'dbt_utils')(model, column_name, min_value, max_value, inclusive)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_accepted_range"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.382098, + "supported_languages": null + }, + "macro.dbt_utils.default__test_accepted_range": { + "name": "default__test_accepted_range", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/accepted_range.sql", + "original_file_path": "macros/generic_tests/accepted_range.sql", + "unique_id": "macro.dbt_utils.default__test_accepted_range", + "macro_sql": "{% macro default__test_accepted_range(model, column_name, min_value=none, max_value=none, inclusive=true) %}\n\nwith meet_condition as(\n select *\n from {{ model }}\n),\n\nvalidation_errors as (\n select *\n from meet_condition\n where\n -- never true, defaults to an empty result set. Exists to ensure any combo of the `or` clauses below succeeds\n 1 = 2\n\n {%- if min_value is not none %}\n -- records with a value >= min_value are permitted. The `not` flips this to find records that don't meet the rule.\n or not {{ column_name }} > {{- \"=\" if inclusive }} {{ min_value }}\n {%- endif %}\n\n {%- if max_value is not none %}\n -- records with a value <= max_value are permitted. The `not` flips this to find records that don't meet the rule.\n or not {{ column_name }} < {{- \"=\" if inclusive }} {{ max_value }}\n {%- endif %}\n)\n\nselect *\nfrom validation_errors\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3826098, + "supported_languages": null + }, + "macro.dbt_utils.test_not_accepted_values": { + "name": "test_not_accepted_values", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_accepted_values.sql", + "original_file_path": "macros/generic_tests/not_accepted_values.sql", + "unique_id": "macro.dbt_utils.test_not_accepted_values", + "macro_sql": "{% test not_accepted_values(model, column_name, values, quote=True) %}\n {{ return(adapter.dispatch('test_not_accepted_values', 'dbt_utils')(model, column_name, values, quote)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_not_accepted_values"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.383193, + "supported_languages": null + }, + "macro.dbt_utils.default__test_not_accepted_values": { + "name": "default__test_not_accepted_values", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_accepted_values.sql", + "original_file_path": "macros/generic_tests/not_accepted_values.sql", + "unique_id": "macro.dbt_utils.default__test_not_accepted_values", + "macro_sql": "{% macro default__test_not_accepted_values(model, column_name, values, quote=True) %}\nwith all_values as (\n\n select distinct\n {{ column_name }} as value_field\n\n from {{ model }}\n\n),\n\nvalidation_errors as (\n\n select\n value_field\n\n from all_values\n where value_field in (\n {% for value in values -%}\n {% if quote -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif -%}\n {%- if not loop.last -%},{%- endif %}\n {%- endfor %}\n )\n\n)\n\nselect *\nfrom validation_errors\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3835871, + "supported_languages": null + }, + "macro.dbt_utils.test_at_least_one": { + "name": "test_at_least_one", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/at_least_one.sql", + "original_file_path": "macros/generic_tests/at_least_one.sql", + "unique_id": "macro.dbt_utils.test_at_least_one", + "macro_sql": "{% test at_least_one(model, column_name, group_by_columns = []) %}\n {{ return(adapter.dispatch('test_at_least_one', 'dbt_utils')(model, column_name, group_by_columns)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_at_least_one"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.384053, + "supported_languages": null + }, + "macro.dbt_utils.default__test_at_least_one": { + "name": "default__test_at_least_one", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/at_least_one.sql", + "original_file_path": "macros/generic_tests/at_least_one.sql", + "unique_id": "macro.dbt_utils.default__test_at_least_one", + "macro_sql": "{% macro default__test_at_least_one(model, column_name, group_by_columns) %}\n\n{% if group_by_columns|length() > 0 %}\n {% set select_gb_cols = group_by_columns|join(' ,') + ', ' %}\n {% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n{% endif %}\n\nselect *\nfrom (\n select\n {# In TSQL, subquery aggregate columns need aliases #}\n {# thus: a filler col name, 'filler_column' #}\n {{select_gb_cols}}\n count({{ column_name }}) as filler_column\n\n from {{ model }}\n\n {{groupby_gb_cols}}\n\n having count({{ column_name }}) = 0\n\n) validation_errors\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3845341, + "supported_languages": null + }, + "macro.dbt_utils.test_unique_combination_of_columns": { + "name": "test_unique_combination_of_columns", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/unique_combination_of_columns.sql", + "original_file_path": "macros/generic_tests/unique_combination_of_columns.sql", + "unique_id": "macro.dbt_utils.test_unique_combination_of_columns", + "macro_sql": "{% test unique_combination_of_columns(model, combination_of_columns, quote_columns=false) %}\n {{ return(adapter.dispatch('test_unique_combination_of_columns', 'dbt_utils')(model, combination_of_columns, quote_columns)) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_utils.default__test_unique_combination_of_columns" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.385195, + "supported_languages": null + }, + "macro.dbt_utils.default__test_unique_combination_of_columns": { + "name": "default__test_unique_combination_of_columns", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/unique_combination_of_columns.sql", + "original_file_path": "macros/generic_tests/unique_combination_of_columns.sql", + "unique_id": "macro.dbt_utils.default__test_unique_combination_of_columns", + "macro_sql": "{% macro default__test_unique_combination_of_columns(model, combination_of_columns, quote_columns=false) %}\n\n{% if not quote_columns %}\n {%- set column_list=combination_of_columns %}\n{% elif quote_columns %}\n {%- set column_list=[] %}\n {% for column in combination_of_columns -%}\n {% set column_list = column_list.append( adapter.quote(column) ) %}\n {%- endfor %}\n{% else %}\n {{ exceptions.raise_compiler_error(\n \"`quote_columns` argument for unique_combination_of_columns test must be one of [True, False] Got: '\" ~ quote ~\"'.'\"\n ) }}\n{% endif %}\n\n{%- set columns_csv=column_list | join(', ') %}\n\n\nwith validation_errors as (\n\n select\n {{ columns_csv }}\n from {{ model }}\n group by {{ columns_csv }}\n having count(*) > 1\n\n)\n\nselect *\nfrom validation_errors\n\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.385939, + "supported_languages": null + }, + "macro.dbt_utils.test_cardinality_equality": { + "name": "test_cardinality_equality", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/cardinality_equality.sql", + "original_file_path": "macros/generic_tests/cardinality_equality.sql", + "unique_id": "macro.dbt_utils.test_cardinality_equality", + "macro_sql": "{% test cardinality_equality(model, column_name, to, field) %}\n {{ return(adapter.dispatch('test_cardinality_equality', 'dbt_utils')(model, column_name, to, field)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_cardinality_equality"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.386507, + "supported_languages": null + }, + "macro.dbt_utils.default__test_cardinality_equality": { + "name": "default__test_cardinality_equality", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/cardinality_equality.sql", + "original_file_path": "macros/generic_tests/cardinality_equality.sql", + "unique_id": "macro.dbt_utils.default__test_cardinality_equality", + "macro_sql": "{% macro default__test_cardinality_equality(model, column_name, to, field) %}\n\n{# T-SQL does not let you use numbers as aliases for columns #}\n{# Thus, no \"GROUP BY 1\" #}\n\nwith table_a as (\nselect\n {{ column_name }},\n count(*) as num_rows\nfrom {{ model }}\ngroup by {{ column_name }}\n),\n\ntable_b as (\nselect\n {{ field }},\n count(*) as num_rows\nfrom {{ to }}\ngroup by {{ field }}\n),\n\nexcept_a as (\n select *\n from table_a\n {{ dbt.except() }}\n select *\n from table_b\n),\n\nexcept_b as (\n select *\n from table_b\n {{ dbt.except() }}\n select *\n from table_a\n),\n\nunioned as (\n select *\n from except_a\n union all\n select *\n from except_b\n)\n\nselect *\nfrom unioned\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.except"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.386859, + "supported_languages": null + }, + "macro.dbt_utils.test_expression_is_true": { + "name": "test_expression_is_true", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/expression_is_true.sql", + "original_file_path": "macros/generic_tests/expression_is_true.sql", + "unique_id": "macro.dbt_utils.test_expression_is_true", + "macro_sql": "{% test expression_is_true(model, expression, column_name=None) %}\n {{ return(adapter.dispatch('test_expression_is_true', 'dbt_utils')(model, expression, column_name)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_expression_is_true"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3872619, + "supported_languages": null + }, + "macro.dbt_utils.default__test_expression_is_true": { + "name": "default__test_expression_is_true", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/expression_is_true.sql", + "original_file_path": "macros/generic_tests/expression_is_true.sql", + "unique_id": "macro.dbt_utils.default__test_expression_is_true", + "macro_sql": "{% macro default__test_expression_is_true(model, expression, column_name) %}\n\n{% set column_list = '*' if should_store_failures() else \"1\" %}\n\nselect\n {{ column_list }}\nfrom {{ model }}\n{% if column_name is none %}\nwhere not({{ expression }})\n{%- else %}\nwhere not({{ column_name }} {{ expression }})\n{%- endif %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.should_store_failures"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.38763, + "supported_languages": null + }, + "macro.dbt_utils.test_not_null_proportion": { + "name": "test_not_null_proportion", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_null_proportion.sql", + "original_file_path": "macros/generic_tests/not_null_proportion.sql", + "unique_id": "macro.dbt_utils.test_not_null_proportion", + "macro_sql": "{% macro test_not_null_proportion(model, group_by_columns = []) %}\n {{ return(adapter.dispatch('test_not_null_proportion', 'dbt_utils')(model, group_by_columns, **kwargs)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_not_null_proportion"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.388314, + "supported_languages": null + }, + "macro.dbt_utils.default__test_not_null_proportion": { + "name": "default__test_not_null_proportion", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_null_proportion.sql", + "original_file_path": "macros/generic_tests/not_null_proportion.sql", + "unique_id": "macro.dbt_utils.default__test_not_null_proportion", + "macro_sql": "{% macro default__test_not_null_proportion(model, group_by_columns) %}\n\n{% set column_name = kwargs.get('column_name', kwargs.get('arg')) %}\n{% set at_least = kwargs.get('at_least', kwargs.get('arg')) %}\n{% set at_most = kwargs.get('at_most', kwargs.get('arg', 1)) %}\n\n{% if group_by_columns|length() > 0 %}\n {% set select_gb_cols = group_by_columns|join(' ,') + ', ' %}\n {% set groupby_gb_cols = 'group by ' + group_by_columns|join(',') %}\n{% endif %}\n\nwith validation as (\n select\n {{select_gb_cols}}\n sum(case when {{ column_name }} is null then 0 else 1 end) / cast(count(*) as numeric) as not_null_proportion\n from {{ model }}\n {{groupby_gb_cols}}\n),\nvalidation_errors as (\n select\n {{select_gb_cols}}\n not_null_proportion\n from validation\n where not_null_proportion < {{ at_least }} or not_null_proportion > {{ at_most }}\n)\nselect\n *\nfrom validation_errors\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.389201, + "supported_languages": null + }, + "macro.dbt_utils.test_sequential_values": { + "name": "test_sequential_values", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/sequential_values.sql", + "original_file_path": "macros/generic_tests/sequential_values.sql", + "unique_id": "macro.dbt_utils.test_sequential_values", + "macro_sql": "{% test sequential_values(model, column_name, interval=1, datepart=None, group_by_columns = []) %}\n\n {{ return(adapter.dispatch('test_sequential_values', 'dbt_utils')(model, column_name, interval, datepart, group_by_columns)) }}\n\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_sequential_values"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.390322, + "supported_languages": null + }, + "macro.dbt_utils.default__test_sequential_values": { + "name": "default__test_sequential_values", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/sequential_values.sql", + "original_file_path": "macros/generic_tests/sequential_values.sql", + "unique_id": "macro.dbt_utils.default__test_sequential_values", + "macro_sql": "{% macro default__test_sequential_values(model, column_name, interval=1, datepart=None, group_by_columns = []) %}\n\n{% set previous_column_name = \"previous_\" ~ dbt_utils.slugify(column_name) %}\n\n{% if group_by_columns|length() > 0 %}\n {% set select_gb_cols = group_by_columns|join(',') + ', ' %}\n {% set partition_gb_cols = 'partition by ' + group_by_columns|join(',') %}\n{% endif %}\n\nwith windowed as (\n\n select\n {{ select_gb_cols }}\n {{ column_name }},\n lag({{ column_name }}) over (\n {{partition_gb_cols}}\n order by {{ column_name }}\n ) as {{ previous_column_name }}\n from {{ model }}\n),\n\nvalidation_errors as (\n select\n *\n from windowed\n {% if datepart %}\n where not(cast({{ column_name }} as {{ dbt.type_timestamp() }})= cast({{ dbt.dateadd(datepart, interval, previous_column_name) }} as {{ dbt.type_timestamp() }}))\n {% else %}\n where not({{ column_name }} = {{ previous_column_name }} + {{ interval }})\n {% endif %}\n)\n\nselect *\nfrom validation_errors\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils.slugify", + "macro.dbt.type_timestamp", + "macro.dbt.dateadd" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.3915038, + "supported_languages": null + }, + "macro.dbt_utils.test_equality": { + "name": "test_equality", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/equality.sql", + "original_file_path": "macros/generic_tests/equality.sql", + "unique_id": "macro.dbt_utils.test_equality", + "macro_sql": "{% test equality(model, compare_model, compare_columns=None) %}\n {{ return(adapter.dispatch('test_equality', 'dbt_utils')(model, compare_model, compare_columns)) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_utils.default__test_equality"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.392388, + "supported_languages": null + }, + "macro.dbt_utils.default__test_equality": { + "name": "default__test_equality", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/equality.sql", + "original_file_path": "macros/generic_tests/equality.sql", + "unique_id": "macro.dbt_utils.default__test_equality", + "macro_sql": "{% macro default__test_equality(model, compare_model, compare_columns=None) %}\n\n{% set set_diff %}\n count(*) + coalesce(abs(\n sum(case when which_diff = 'a_minus_b' then 1 else 0 end) -\n sum(case when which_diff = 'b_minus_a' then 1 else 0 end)\n ), 0)\n{% endset %}\n\n{#-- Needs to be set at parse time, before we return '' below --#}\n{{ config(fail_calc = set_diff) }}\n\n{#-- Prevent querying of db in parsing mode. This works because this macro does not create any new refs. #}\n{%- if not execute -%}\n {{ return('') }}\n{% endif %}\n\n-- setup\n{%- do dbt_utils._is_relation(model, 'test_equality') -%}\n\n{#-\nIf the compare_cols arg is provided, we can run this test without querying the\ninformation schema\u00a0\u2014 this allows the model to be an ephemeral model\n-#}\n\n{%- if not compare_columns -%}\n {%- do dbt_utils._is_ephemeral(model, 'test_equality') -%}\n {%- set compare_columns = adapter.get_columns_in_relation(model) | map(attribute='quoted') -%}\n{%- endif -%}\n\n{% set compare_cols_csv = compare_columns | join(', ') %}\n\nwith a as (\n\n select * from {{ model }}\n\n),\n\nb as (\n\n select * from {{ compare_model }}\n\n),\n\na_minus_b as (\n\n select {{compare_cols_csv}} from a\n {{ dbt.except() }}\n select {{compare_cols_csv}} from b\n\n),\n\nb_minus_a as (\n\n select {{compare_cols_csv}} from b\n {{ dbt.except() }}\n select {{compare_cols_csv}} from a\n\n),\n\nunioned as (\n\n select 'a_minus_b' as which_diff, a_minus_b.* from a_minus_b\n union all\n select 'b_minus_a' as which_diff, b_minus_a.* from b_minus_a\n\n)\n\nselect * from unioned\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils._is_relation", + "macro.dbt_utils._is_ephemeral", + "macro.dbt.except" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.394152, + "supported_languages": null + }, + "macro.dbt_utils.test_not_empty_string": { + "name": "test_not_empty_string", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_empty_string.sql", + "original_file_path": "macros/generic_tests/not_empty_string.sql", + "unique_id": "macro.dbt_utils.test_not_empty_string", + "macro_sql": "{% test not_empty_string(model, column_name, trim_whitespace=true) %}\n\n {{ return(adapter.dispatch('test_not_empty_string', 'dbt_utils')(model, column_name, trim_whitespace)) }}\n\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_not_empty_string"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.39478, + "supported_languages": null + }, + "macro.dbt_utils.default__test_not_empty_string": { + "name": "default__test_not_empty_string", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/not_empty_string.sql", + "original_file_path": "macros/generic_tests/not_empty_string.sql", + "unique_id": "macro.dbt_utils.default__test_not_empty_string", + "macro_sql": "{% macro default__test_not_empty_string(model, column_name, trim_whitespace=true) %}\n\n with\n \n all_values as (\n\n select \n\n\n {% if trim_whitespace == true -%}\n\n trim({{ column_name }}) as {{ column_name }}\n\n {%- else -%}\n\n {{ column_name }}\n\n {%- endif %}\n \n from {{ model }}\n\n ),\n\n errors as (\n\n select * from all_values\n where {{ column_name }} = ''\n\n )\n\n select * from errors\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.395126, + "supported_languages": null + }, + "macro.dbt_utils.test_mutually_exclusive_ranges": { + "name": "test_mutually_exclusive_ranges", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/mutually_exclusive_ranges.sql", + "original_file_path": "macros/generic_tests/mutually_exclusive_ranges.sql", + "unique_id": "macro.dbt_utils.test_mutually_exclusive_ranges", + "macro_sql": "{% test mutually_exclusive_ranges(model, lower_bound_column, upper_bound_column, partition_by=None, gaps='allowed', zero_length_range_allowed=False) %}\n {{ return(adapter.dispatch('test_mutually_exclusive_ranges', 'dbt_utils')(model, lower_bound_column, upper_bound_column, partition_by, gaps, zero_length_range_allowed)) }}\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__test_mutually_exclusive_ranges"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.398968, + "supported_languages": null + }, + "macro.dbt_utils.default__test_mutually_exclusive_ranges": { + "name": "default__test_mutually_exclusive_ranges", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/generic_tests/mutually_exclusive_ranges.sql", + "original_file_path": "macros/generic_tests/mutually_exclusive_ranges.sql", + "unique_id": "macro.dbt_utils.default__test_mutually_exclusive_ranges", + "macro_sql": "{% macro default__test_mutually_exclusive_ranges(model, lower_bound_column, upper_bound_column, partition_by=None, gaps='allowed', zero_length_range_allowed=False) %}\n{% if gaps == 'not_allowed' %}\n {% set allow_gaps_operator='=' %}\n {% set allow_gaps_operator_in_words='equal_to' %}\n{% elif gaps == 'allowed' %}\n {% set allow_gaps_operator='<=' %}\n {% set allow_gaps_operator_in_words='less_than_or_equal_to' %}\n{% elif gaps == 'required' %}\n {% set allow_gaps_operator='<' %}\n {% set allow_gaps_operator_in_words='less_than' %}\n{% else %}\n {{ exceptions.raise_compiler_error(\n \"`gaps` argument for mutually_exclusive_ranges test must be one of ['not_allowed', 'allowed', 'required'] Got: '\" ~ gaps ~\"'.'\"\n ) }}\n{% endif %}\n{% if not zero_length_range_allowed %}\n {% set allow_zero_length_operator='<' %}\n {% set allow_zero_length_operator_in_words='less_than' %}\n{% elif zero_length_range_allowed %}\n {% set allow_zero_length_operator='<=' %}\n {% set allow_zero_length_operator_in_words='less_than_or_equal_to' %}\n{% else %}\n {{ exceptions.raise_compiler_error(\n \"`zero_length_range_allowed` argument for mutually_exclusive_ranges test must be one of [true, false] Got: '\" ~ zero_length_range_allowed ~\"'.'\"\n ) }}\n{% endif %}\n\n{% set partition_clause=\"partition by \" ~ partition_by if partition_by else '' %}\n\nwith window_functions as (\n\n select\n {% if partition_by %}\n {{ partition_by }} as partition_by_col,\n {% endif %}\n {{ lower_bound_column }} as lower_bound,\n {{ upper_bound_column }} as upper_bound,\n\n lead({{ lower_bound_column }}) over (\n {{ partition_clause }}\n order by {{ lower_bound_column }}, {{ upper_bound_column }}\n ) as next_lower_bound,\n\n row_number() over (\n {{ partition_clause }}\n order by {{ lower_bound_column }} desc, {{ upper_bound_column }} desc\n ) = 1 as is_last_record\n\n from {{ model }}\n\n),\n\ncalc as (\n -- We want to return records where one of our assumptions fails, so we'll use\n -- the `not` function with `and` statements so we can write our assumptions more cleanly\n select\n *,\n\n -- For each record: lower_bound should be < upper_bound.\n -- Coalesce it to return an error on the null case (implicit assumption\n -- these columns are not_null)\n coalesce(\n lower_bound {{ allow_zero_length_operator }} upper_bound,\n false\n ) as lower_bound_{{ allow_zero_length_operator_in_words }}_upper_bound,\n\n -- For each record: upper_bound {{ allow_gaps_operator }} the next lower_bound.\n -- Coalesce it to handle null cases for the last record.\n coalesce(\n upper_bound {{ allow_gaps_operator }} next_lower_bound,\n is_last_record,\n false\n ) as upper_bound_{{ allow_gaps_operator_in_words }}_next_lower_bound\n\n from window_functions\n\n),\n\nvalidation_errors as (\n\n select\n *\n from calc\n\n where not(\n -- THE FOLLOWING SHOULD BE TRUE --\n lower_bound_{{ allow_zero_length_operator_in_words }}_upper_bound\n and upper_bound_{{ allow_gaps_operator_in_words }}_next_lower_bound\n )\n)\n\nselect * from validation_errors\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.400824, + "supported_languages": null + }, + "macro.dbt_utils.pretty_log_format": { + "name": "pretty_log_format", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/pretty_log_format.sql", + "original_file_path": "macros/jinja_helpers/pretty_log_format.sql", + "unique_id": "macro.dbt_utils.pretty_log_format", + "macro_sql": "{% macro pretty_log_format(message) %}\n {{ return(adapter.dispatch('pretty_log_format', 'dbt_utils')(message)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__pretty_log_format"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.40131, + "supported_languages": null + }, + "macro.dbt_utils.default__pretty_log_format": { + "name": "default__pretty_log_format", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/pretty_log_format.sql", + "original_file_path": "macros/jinja_helpers/pretty_log_format.sql", + "unique_id": "macro.dbt_utils.default__pretty_log_format", + "macro_sql": "{% macro default__pretty_log_format(message) %}\n {{ return( dbt_utils.pretty_time() ~ ' + ' ~ message) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.pretty_time"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.401498, + "supported_languages": null + }, + "macro.dbt_utils._is_relation": { + "name": "_is_relation", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/_is_relation.sql", + "original_file_path": "macros/jinja_helpers/_is_relation.sql", + "unique_id": "macro.dbt_utils._is_relation", + "macro_sql": "{% macro _is_relation(obj, macro) %}\n {%- if not (obj is mapping and obj.get('metadata', {}).get('type', '').endswith('Relation')) -%}\n {%- do exceptions.raise_compiler_error(\"Macro \" ~ macro ~ \" expected a Relation but received the value: \" ~ obj) -%}\n {%- endif -%}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4019809, + "supported_languages": null + }, + "macro.dbt_utils.pretty_time": { + "name": "pretty_time", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/pretty_time.sql", + "original_file_path": "macros/jinja_helpers/pretty_time.sql", + "unique_id": "macro.dbt_utils.pretty_time", + "macro_sql": "{% macro pretty_time(format='%H:%M:%S') %}\n {{ return(adapter.dispatch('pretty_time', 'dbt_utils')(format)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__pretty_time"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.402279, + "supported_languages": null + }, + "macro.dbt_utils.default__pretty_time": { + "name": "default__pretty_time", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/pretty_time.sql", + "original_file_path": "macros/jinja_helpers/pretty_time.sql", + "unique_id": "macro.dbt_utils.default__pretty_time", + "macro_sql": "{% macro default__pretty_time(format='%H:%M:%S') %}\n {{ return(modules.datetime.datetime.now().strftime(format)) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.402482, + "supported_languages": null + }, + "macro.dbt_utils.log_info": { + "name": "log_info", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/log_info.sql", + "original_file_path": "macros/jinja_helpers/log_info.sql", + "unique_id": "macro.dbt_utils.log_info", + "macro_sql": "{% macro log_info(message) %}\n {{ return(adapter.dispatch('log_info', 'dbt_utils')(message)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__log_info"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.402744, + "supported_languages": null + }, + "macro.dbt_utils.default__log_info": { + "name": "default__log_info", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/log_info.sql", + "original_file_path": "macros/jinja_helpers/log_info.sql", + "unique_id": "macro.dbt_utils.default__log_info", + "macro_sql": "{% macro default__log_info(message) %}\n {{ log(dbt_utils.pretty_log_format(message), info=True) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.pretty_log_format"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.402921, + "supported_languages": null + }, + "macro.dbt_utils.slugify": { + "name": "slugify", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/slugify.sql", + "original_file_path": "macros/jinja_helpers/slugify.sql", + "unique_id": "macro.dbt_utils.slugify", + "macro_sql": "{% macro slugify(string) %}\n\n{#- Lower case the string -#}\n{% set string = string | lower %}\n{#- Replace spaces and dashes with underscores -#}\n{% set string = modules.re.sub('[ -]+', '_', string) %}\n{#- Only take letters, numbers, and underscores -#}\n{% set string = modules.re.sub('[^a-z0-9_]+', '', string) %}\n{#- Prepends \"_\" if string begins with a number -#}\n{% set string = modules.re.sub('^[0-9]', '_' + string[0], string) %}\n\n{{ return(string) }}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.403626, + "supported_languages": null + }, + "macro.dbt_utils._is_ephemeral": { + "name": "_is_ephemeral", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/jinja_helpers/_is_ephemeral.sql", + "original_file_path": "macros/jinja_helpers/_is_ephemeral.sql", + "unique_id": "macro.dbt_utils._is_ephemeral", + "macro_sql": "{% macro _is_ephemeral(obj, macro) %}\n {%- if obj.is_cte -%}\n {% set ephemeral_prefix = api.Relation.add_ephemeral_prefix('') %}\n {% if obj.name.startswith(ephemeral_prefix) %}\n {% set model_name = obj.name[(ephemeral_prefix|length):] %}\n {% else %}\n {% set model_name = obj.name %}\n {%- endif -%}\n {% set error_message %}\nThe `{{ macro }}` macro cannot be used with ephemeral models, as it relies on the information schema.\n\n`{{ model_name }}` is an ephemeral model. Consider making it a view or table instead.\n {% endset %}\n {%- do exceptions.raise_compiler_error(error_message) -%}\n {%- endif -%}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.40452, + "supported_languages": null + }, + "macro.dbt_utils.get_intervals_between": { + "name": "get_intervals_between", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/date_spine.sql", + "original_file_path": "macros/sql/date_spine.sql", + "unique_id": "macro.dbt_utils.get_intervals_between", + "macro_sql": "{% macro get_intervals_between(start_date, end_date, datepart) -%}\n {{ return(adapter.dispatch('get_intervals_between', 'dbt_utils')(start_date, end_date, datepart)) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_intervals_between"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.405272, + "supported_languages": null + }, + "macro.dbt_utils.default__get_intervals_between": { + "name": "default__get_intervals_between", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/date_spine.sql", + "original_file_path": "macros/sql/date_spine.sql", + "unique_id": "macro.dbt_utils.default__get_intervals_between", + "macro_sql": "{% macro default__get_intervals_between(start_date, end_date, datepart) -%}\n {%- call statement('get_intervals_between', fetch_result=True) %}\n\n select {{ dbt.datediff(start_date, end_date, datepart) }}\n\n {%- endcall -%}\n\n {%- set value_list = load_result('get_intervals_between') -%}\n\n {%- if value_list and value_list['data'] -%}\n {%- set values = value_list['data'] | map(attribute=0) | list %}\n {{ return(values[0]) }}\n {%- else -%}\n {{ return(1) }}\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement", "macro.dbt.datediff"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.40594, + "supported_languages": null + }, + "macro.dbt_utils.date_spine": { + "name": "date_spine", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/date_spine.sql", + "original_file_path": "macros/sql/date_spine.sql", + "unique_id": "macro.dbt_utils.date_spine", + "macro_sql": "{% macro date_spine(datepart, start_date, end_date) %}\n {{ return(adapter.dispatch('date_spine', 'dbt_utils')(datepart, start_date, end_date)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__date_spine"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.406213, + "supported_languages": null + }, + "macro.dbt_utils.default__date_spine": { + "name": "default__date_spine", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/date_spine.sql", + "original_file_path": "macros/sql/date_spine.sql", + "unique_id": "macro.dbt_utils.default__date_spine", + "macro_sql": "{% macro default__date_spine(datepart, start_date, end_date) %}\n\n\n{# call as follows:\n\ndate_spine(\n \"day\",\n \"to_date('01/01/2016', 'mm/dd/yyyy')\",\n \"dbt.dateadd(week, 1, current_date)\"\n) #}\n\n\nwith rawdata as (\n\n {{dbt_utils.generate_series(\n dbt_utils.get_intervals_between(start_date, end_date, datepart)\n )}}\n\n),\n\nall_periods as (\n\n select (\n {{\n dbt.dateadd(\n datepart,\n \"row_number() over (order by 1) - 1\",\n start_date\n )\n }}\n ) as date_{{datepart}}\n from rawdata\n\n),\n\nfiltered as (\n\n select *\n from all_periods\n where date_{{datepart}} <= {{ end_date }}\n\n)\n\nselect * from filtered\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils.generate_series", + "macro.dbt_utils.get_intervals_between", + "macro.dbt.dateadd" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.406624, + "supported_languages": null + }, + "macro.dbt_utils.safe_subtract": { + "name": "safe_subtract", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/safe_subtract.sql", + "original_file_path": "macros/sql/safe_subtract.sql", + "unique_id": "macro.dbt_utils.safe_subtract", + "macro_sql": "{%- macro safe_subtract(field_list) -%}\n {{ return(adapter.dispatch('safe_subtract', 'dbt_utils')(field_list)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__safe_subtract"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4070568, + "supported_languages": null + }, + "macro.dbt_utils.default__safe_subtract": { + "name": "default__safe_subtract", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/safe_subtract.sql", + "original_file_path": "macros/sql/safe_subtract.sql", + "unique_id": "macro.dbt_utils.default__safe_subtract", + "macro_sql": "\n\n{%- macro default__safe_subtract(field_list) -%}\n\n{%- if field_list is not iterable or field_list is string or field_list is mapping -%}\n\n{%- set error_message = '\nWarning: the `safe_subtract` macro takes a single list argument instead of \\\nstring arguments. The {}.{} model triggered this warning. \\\n'.format(model.package_name, model.name) -%}\n\n{%- do exceptions.raise_compiler_error(error_message) -%}\n\n{%- endif -%}\n\n{% set fields = [] %}\n\n{%- for field in field_list -%}\n\n {% do fields.append(\"coalesce(\" ~ field ~ \", 0)\") %}\n\n{%- endfor -%}\n\n{{ fields|join(' -\\n ') }}\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.407677, + "supported_languages": null + }, + "macro.dbt_utils.nullcheck_table": { + "name": "nullcheck_table", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/nullcheck_table.sql", + "original_file_path": "macros/sql/nullcheck_table.sql", + "unique_id": "macro.dbt_utils.nullcheck_table", + "macro_sql": "{% macro nullcheck_table(relation) %}\n {{ return(adapter.dispatch('nullcheck_table', 'dbt_utils')(relation)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__nullcheck_table"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.408017, + "supported_languages": null + }, + "macro.dbt_utils.default__nullcheck_table": { + "name": "default__nullcheck_table", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/nullcheck_table.sql", + "original_file_path": "macros/sql/nullcheck_table.sql", + "unique_id": "macro.dbt_utils.default__nullcheck_table", + "macro_sql": "{% macro default__nullcheck_table(relation) %}\n\n {%- do dbt_utils._is_relation(relation, 'nullcheck_table') -%}\n {%- do dbt_utils._is_ephemeral(relation, 'nullcheck_table') -%}\n {% set cols = adapter.get_columns_in_relation(relation) %}\n\n select {{ dbt_utils.nullcheck(cols) }}\n from {{relation}}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils._is_relation", + "macro.dbt_utils._is_ephemeral", + "macro.dbt_utils.nullcheck" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4083672, + "supported_languages": null + }, + "macro.dbt_utils.get_relations_by_pattern": { + "name": "get_relations_by_pattern", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_relations_by_pattern.sql", + "original_file_path": "macros/sql/get_relations_by_pattern.sql", + "unique_id": "macro.dbt_utils.get_relations_by_pattern", + "macro_sql": "{% macro get_relations_by_pattern(schema_pattern, table_pattern, exclude='', database=target.database) %}\n {{ return(adapter.dispatch('get_relations_by_pattern', 'dbt_utils')(schema_pattern, table_pattern, exclude, database)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_relations_by_pattern"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.409062, + "supported_languages": null + }, + "macro.dbt_utils.default__get_relations_by_pattern": { + "name": "default__get_relations_by_pattern", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_relations_by_pattern.sql", + "original_file_path": "macros/sql/get_relations_by_pattern.sql", + "unique_id": "macro.dbt_utils.default__get_relations_by_pattern", + "macro_sql": "{% macro default__get_relations_by_pattern(schema_pattern, table_pattern, exclude='', database=target.database) %}\n\n {%- call statement('get_tables', fetch_result=True) %}\n\n {{ dbt_utils.get_tables_by_pattern_sql(schema_pattern, table_pattern, exclude, database) }}\n\n {%- endcall -%}\n\n {%- set table_list = load_result('get_tables') -%}\n\n {%- if table_list and table_list['table'] -%}\n {%- set tbl_relations = [] -%}\n {%- for row in table_list['table'] -%}\n {%- set tbl_relation = api.Relation.create(\n database=database,\n schema=row.table_schema,\n identifier=row.table_name,\n type=row.table_type\n ) -%}\n {%- do tbl_relations.append(tbl_relation) -%}\n {%- endfor -%}\n\n {{ return(tbl_relations) }}\n {%- else -%}\n {{ return([]) }}\n {%- endif -%}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.statement", + "macro.dbt_utils.get_tables_by_pattern_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4100292, + "supported_languages": null + }, + "macro.dbt_utils.get_powers_of_two": { + "name": "get_powers_of_two", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/generate_series.sql", + "original_file_path": "macros/sql/generate_series.sql", + "unique_id": "macro.dbt_utils.get_powers_of_two", + "macro_sql": "{% macro get_powers_of_two(upper_bound) %}\n {{ return(adapter.dispatch('get_powers_of_two', 'dbt_utils')(upper_bound)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_powers_of_two"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4111218, + "supported_languages": null + }, + "macro.dbt_utils.default__get_powers_of_two": { + "name": "default__get_powers_of_two", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/generate_series.sql", + "original_file_path": "macros/sql/generate_series.sql", + "unique_id": "macro.dbt_utils.default__get_powers_of_two", + "macro_sql": "{% macro default__get_powers_of_two(upper_bound) %}\n\n {% if upper_bound <= 0 %}\n {{ exceptions.raise_compiler_error(\"upper bound must be positive\") }}\n {% endif %}\n\n {% for _ in range(1, 100) %}\n {% if upper_bound <= 2 ** loop.index %}{{ return(loop.index) }}{% endif %}\n {% endfor %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4118369, + "supported_languages": null + }, + "macro.dbt_utils.generate_series": { + "name": "generate_series", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/generate_series.sql", + "original_file_path": "macros/sql/generate_series.sql", + "unique_id": "macro.dbt_utils.generate_series", + "macro_sql": "{% macro generate_series(upper_bound) %}\n {{ return(adapter.dispatch('generate_series', 'dbt_utils')(upper_bound)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__generate_series"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4121149, + "supported_languages": null + }, + "macro.dbt_utils.default__generate_series": { + "name": "default__generate_series", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/generate_series.sql", + "original_file_path": "macros/sql/generate_series.sql", + "unique_id": "macro.dbt_utils.default__generate_series", + "macro_sql": "{% macro default__generate_series(upper_bound) %}\n\n {% set n = dbt_utils.get_powers_of_two(upper_bound) %}\n\n with p as (\n select 0 as generated_number union all select 1\n ), unioned as (\n\n select\n\n {% for i in range(n) %}\n p{{i}}.generated_number * power(2, {{i}})\n {% if not loop.last %} + {% endif %}\n {% endfor %}\n + 1\n as generated_number\n\n from\n\n {% for i in range(n) %}\n p as p{{i}}\n {% if not loop.last %} cross join {% endif %}\n {% endfor %}\n\n )\n\n select *\n from unioned\n where generated_number <= {{upper_bound}}\n order by generated_number\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.get_powers_of_two"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.412717, + "supported_languages": null + }, + "macro.dbt_utils.get_relations_by_prefix": { + "name": "get_relations_by_prefix", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_relations_by_prefix.sql", + "original_file_path": "macros/sql/get_relations_by_prefix.sql", + "unique_id": "macro.dbt_utils.get_relations_by_prefix", + "macro_sql": "{% macro get_relations_by_prefix(schema, prefix, exclude='', database=target.database) %}\n {{ return(adapter.dispatch('get_relations_by_prefix', 'dbt_utils')(schema, prefix, exclude, database)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_relations_by_prefix"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.413507, + "supported_languages": null + }, + "macro.dbt_utils.default__get_relations_by_prefix": { + "name": "default__get_relations_by_prefix", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_relations_by_prefix.sql", + "original_file_path": "macros/sql/get_relations_by_prefix.sql", + "unique_id": "macro.dbt_utils.default__get_relations_by_prefix", + "macro_sql": "{% macro default__get_relations_by_prefix(schema, prefix, exclude='', database=target.database) %}\n\n {%- call statement('get_tables', fetch_result=True) %}\n\n {{ dbt_utils.get_tables_by_prefix_sql(schema, prefix, exclude, database) }}\n\n {%- endcall -%}\n\n {%- set table_list = load_result('get_tables') -%}\n\n {%- if table_list and table_list['table'] -%}\n {%- set tbl_relations = [] -%}\n {%- for row in table_list['table'] -%}\n {%- set tbl_relation = api.Relation.create(\n database=database,\n schema=row.table_schema,\n identifier=row.table_name,\n type=row.table_type\n ) -%}\n {%- do tbl_relations.append(tbl_relation) -%}\n {%- endfor -%}\n\n {{ return(tbl_relations) }}\n {%- else -%}\n {{ return([]) }}\n {%- endif -%}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.statement", + "macro.dbt_utils.get_tables_by_prefix_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.414486, + "supported_languages": null + }, + "macro.dbt_utils.get_tables_by_prefix_sql": { + "name": "get_tables_by_prefix_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_tables_by_prefix_sql.sql", + "original_file_path": "macros/sql/get_tables_by_prefix_sql.sql", + "unique_id": "macro.dbt_utils.get_tables_by_prefix_sql", + "macro_sql": "{% macro get_tables_by_prefix_sql(schema, prefix, exclude='', database=target.database) %}\n {{ return(adapter.dispatch('get_tables_by_prefix_sql', 'dbt_utils')(schema, prefix, exclude, database)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_tables_by_prefix_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.414995, + "supported_languages": null + }, + "macro.dbt_utils.default__get_tables_by_prefix_sql": { + "name": "default__get_tables_by_prefix_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_tables_by_prefix_sql.sql", + "original_file_path": "macros/sql/get_tables_by_prefix_sql.sql", + "unique_id": "macro.dbt_utils.default__get_tables_by_prefix_sql", + "macro_sql": "{% macro default__get_tables_by_prefix_sql(schema, prefix, exclude='', database=target.database) %}\n\n {{ dbt_utils.get_tables_by_pattern_sql(\n schema_pattern = schema,\n table_pattern = prefix ~ '%',\n exclude = exclude,\n database = database\n ) }}\n \n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.get_tables_by_pattern_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4153311, + "supported_languages": null + }, + "macro.dbt_utils.star": { + "name": "star", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/star.sql", + "original_file_path": "macros/sql/star.sql", + "unique_id": "macro.dbt_utils.star", + "macro_sql": "{% macro star(from, relation_alias=False, except=[], prefix='', suffix='', quote_identifiers=True) -%}\r\n {{ return(adapter.dispatch('star', 'dbt_utils')(from, relation_alias, except, prefix, suffix, quote_identifiers)) }}\r\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__star"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4166899, + "supported_languages": null + }, + "macro.dbt_utils.default__star": { + "name": "default__star", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/star.sql", + "original_file_path": "macros/sql/star.sql", + "unique_id": "macro.dbt_utils.default__star", + "macro_sql": "{% macro default__star(from, relation_alias=False, except=[], prefix='', suffix='', quote_identifiers=True) -%}\r\n {%- do dbt_utils._is_relation(from, 'star') -%}\r\n {%- do dbt_utils._is_ephemeral(from, 'star') -%}\r\n\r\n {#-- Prevent querying of db in parsing mode. This works because this macro does not create any new refs. #}\r\n {%- if not execute -%}\r\n {% do return('*') %}\r\n {%- endif -%}\r\n\r\n {% set cols = dbt_utils.get_filtered_columns_in_relation(from, except) %}\r\n\r\n {%- if cols|length <= 0 -%}\r\n {% if flags.WHICH == 'compile' %}\r\n {% set response %}\r\n*\r\n/* No columns were returned. Maybe the relation doesn't exist yet \r\nor all columns were excluded. This star is only output during \r\ndbt compile, and exists to keep SQLFluff happy. */\r\n {% endset %}\r\n {% do return(response) %}\r\n {% else %}\r\n {% do return(\"/* no columns returned from star() macro */\") %}\r\n {% endif %}\r\n {%- else -%}\r\n {%- for col in cols %}\r\n {%- if relation_alias %}{{ relation_alias }}.{% else %}{%- endif -%}\r\n {%- if quote_identifiers -%}\r\n {{ adapter.quote(col)|trim }} {%- if prefix!='' or suffix!='' %} as {{ adapter.quote(prefix ~ col ~ suffix)|trim }} {%- endif -%}\r\n {%- else -%}\r\n {{ col|trim }} {%- if prefix!='' or suffix!='' %} as {{ (prefix ~ col ~ suffix)|trim }} {%- endif -%}\r\n {% endif %}\r\n {%- if not loop.last %},{{ '\\n ' }}{%- endif -%}\r\n {%- endfor -%}\r\n {% endif %}\r\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils._is_relation", + "macro.dbt_utils._is_ephemeral", + "macro.dbt_utils.get_filtered_columns_in_relation" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4182432, + "supported_languages": null + }, + "macro.dbt_utils.unpivot": { + "name": "unpivot", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/unpivot.sql", + "original_file_path": "macros/sql/unpivot.sql", + "unique_id": "macro.dbt_utils.unpivot", + "macro_sql": "{% macro unpivot(relation=none, cast_to='varchar', exclude=none, remove=none, field_name='field_name', value_name='value') -%}\n {{ return(adapter.dispatch('unpivot', 'dbt_utils')(relation, cast_to, exclude, remove, field_name, value_name)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__unpivot"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.419752, + "supported_languages": null + }, + "macro.dbt_utils.default__unpivot": { + "name": "default__unpivot", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/unpivot.sql", + "original_file_path": "macros/sql/unpivot.sql", + "unique_id": "macro.dbt_utils.default__unpivot", + "macro_sql": "{% macro default__unpivot(relation=none, cast_to='varchar', exclude=none, remove=none, field_name='field_name', value_name='value') -%}\n\n {% if not relation %}\n {{ exceptions.raise_compiler_error(\"Error: argument `relation` is required for `unpivot` macro.\") }}\n {% endif %}\n\n {%- set exclude = exclude if exclude is not none else [] %}\n {%- set remove = remove if remove is not none else [] %}\n\n {%- set include_cols = [] %}\n\n {%- set table_columns = {} %}\n\n {%- do table_columns.update({relation: []}) %}\n\n {%- do dbt_utils._is_relation(relation, 'unpivot') -%}\n {%- do dbt_utils._is_ephemeral(relation, 'unpivot') -%}\n {%- set cols = adapter.get_columns_in_relation(relation) %}\n\n {%- for col in cols -%}\n {%- if col.column.lower() not in remove|map('lower') and col.column.lower() not in exclude|map('lower') -%}\n {% do include_cols.append(col) %}\n {%- endif %}\n {%- endfor %}\n\n\n {%- for col in include_cols -%}\n select\n {%- for exclude_col in exclude %}\n {{ exclude_col }},\n {%- endfor %}\n\n cast('{{ col.column }}' as {{ dbt.type_string() }}) as {{ field_name }},\n cast( {% if col.data_type == 'boolean' %}\n {{ dbt.cast_bool_to_text(col.column) }}\n {% else %}\n {{ col.column }}\n {% endif %}\n as {{ cast_to }}) as {{ value_name }}\n\n from {{ relation }}\n\n {% if not loop.last -%}\n union all\n {% endif -%}\n {%- endfor -%}\n\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils._is_relation", + "macro.dbt_utils._is_ephemeral", + "macro.dbt.type_string", + "macro.dbt.cast_bool_to_text" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4216268, + "supported_languages": null + }, + "macro.dbt_utils.safe_divide": { + "name": "safe_divide", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/safe_divide.sql", + "original_file_path": "macros/sql/safe_divide.sql", + "unique_id": "macro.dbt_utils.safe_divide", + "macro_sql": "{% macro safe_divide(numerator, denominator) -%}\n {{ return(adapter.dispatch('safe_divide', 'dbt_utils')(numerator, denominator)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__safe_divide"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.421997, + "supported_languages": null + }, + "macro.dbt_utils.default__safe_divide": { + "name": "default__safe_divide", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/safe_divide.sql", + "original_file_path": "macros/sql/safe_divide.sql", + "unique_id": "macro.dbt_utils.default__safe_divide", + "macro_sql": "{% macro default__safe_divide(numerator, denominator) %}\n ( {{ numerator }} ) / nullif( ( {{ denominator }} ), 0)\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4221492, + "supported_languages": null + }, + "macro.dbt_utils.union_relations": { + "name": "union_relations", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/union.sql", + "original_file_path": "macros/sql/union.sql", + "unique_id": "macro.dbt_utils.union_relations", + "macro_sql": "{%- macro union_relations(relations, column_override=none, include=[], exclude=[], source_column_name='_dbt_source_relation', where=none) -%}\n {{ return(adapter.dispatch('union_relations', 'dbt_utils')(relations, column_override, include, exclude, source_column_name, where)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__union_relations"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.425869, + "supported_languages": null + }, + "macro.dbt_utils.default__union_relations": { + "name": "default__union_relations", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/union.sql", + "original_file_path": "macros/sql/union.sql", + "unique_id": "macro.dbt_utils.default__union_relations", + "macro_sql": "\n\n{%- macro default__union_relations(relations, column_override=none, include=[], exclude=[], source_column_name='_dbt_source_relation', where=none) -%}\n\n {%- if exclude and include -%}\n {{ exceptions.raise_compiler_error(\"Both an exclude and include list were provided to the `union` macro. Only one is allowed\") }}\n {%- endif -%}\n\n {#-- Prevent querying of db in parsing mode. This works because this macro does not create any new refs. -#}\n {%- if not execute %}\n {{ return('') }}\n {% endif -%}\n\n {%- set column_override = column_override if column_override is not none else {} -%}\n\n {%- set relation_columns = {} -%}\n {%- set column_superset = {} -%}\n {%- set all_excludes = [] -%}\n {%- set all_includes = [] -%}\n\n {%- if exclude -%}\n {%- for exc in exclude -%}\n {%- do all_excludes.append(exc | lower) -%}\n {%- endfor -%}\n {%- endif -%}\n\n {%- if include -%}\n {%- for inc in include -%}\n {%- do all_includes.append(inc | lower) -%}\n {%- endfor -%}\n {%- endif -%}\n\n {%- for relation in relations -%}\n\n {%- do relation_columns.update({relation: []}) -%}\n\n {%- do dbt_utils._is_relation(relation, 'union_relations') -%}\n {%- do dbt_utils._is_ephemeral(relation, 'union_relations') -%}\n {%- set cols = adapter.get_columns_in_relation(relation) -%}\n {%- for col in cols -%}\n\n {#- If an exclude list was provided and the column is in the list, do nothing -#}\n {%- if exclude and col.column | lower in all_excludes -%}\n\n {#- If an include list was provided and the column is not in the list, do nothing -#}\n {%- elif include and col.column | lower not in all_includes -%}\n\n {#- Otherwise add the column to the column superset -#}\n {%- else -%}\n\n {#- update the list of columns in this relation -#}\n {%- do relation_columns[relation].append(col.column) -%}\n\n {%- if col.column in column_superset -%}\n\n {%- set stored = column_superset[col.column] -%}\n {%- if col.is_string() and stored.is_string() and col.string_size() > stored.string_size() -%}\n\n {%- do column_superset.update({col.column: col}) -%}\n\n {%- endif %}\n\n {%- else -%}\n\n {%- do column_superset.update({col.column: col}) -%}\n\n {%- endif -%}\n\n {%- endif -%}\n\n {%- endfor -%}\n {%- endfor -%}\n\n {%- set ordered_column_names = column_superset.keys() -%}\n {%- set dbt_command = flags.WHICH -%}\n\n\n {% if dbt_command in ['run', 'build'] %}\n {% if (include | length > 0 or exclude | length > 0) and not column_superset.keys() %}\n {%- set relations_string -%}\n {%- for relation in relations -%}\n {{ relation.name }}\n {%- if not loop.last %}, {% endif -%}\n {%- endfor -%}\n {%- endset -%}\n\n {%- set error_message -%}\n There were no columns found to union for relations {{ relations_string }}\n {%- endset -%}\n\n {{ exceptions.raise_compiler_error(error_message) }}\n {%- endif -%}\n {%- endif -%}\n\n {%- for relation in relations %}\n\n (\n select\n\n {%- if source_column_name is not none %}\n cast({{ dbt.string_literal(relation) }} as {{ dbt.type_string() }}) as {{ source_column_name }},\n {%- endif %}\n\n {% for col_name in ordered_column_names -%}\n\n {%- set col = column_superset[col_name] %}\n {%- set col_type = column_override.get(col.column, col.data_type) %}\n {%- set col_name = adapter.quote(col_name) if col_name in relation_columns[relation] else 'null' %}\n cast({{ col_name }} as {{ col_type }}) as {{ col.quoted }} {% if not loop.last %},{% endif -%}\n\n {%- endfor %}\n\n from {{ relation }}\n\n {% if where -%}\n where {{ where }}\n {%- endif %}\n )\n\n {% if not loop.last -%}\n union all\n {% endif -%}\n\n {%- endfor -%}\n\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt_utils._is_relation", + "macro.dbt_utils._is_ephemeral", + "macro.dbt.string_literal", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.429781, + "supported_languages": null + }, + "macro.dbt_utils.group_by": { + "name": "group_by", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/groupby.sql", + "original_file_path": "macros/sql/groupby.sql", + "unique_id": "macro.dbt_utils.group_by", + "macro_sql": "{%- macro group_by(n) -%}\n {{ return(adapter.dispatch('group_by', 'dbt_utils')(n)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__group_by"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.430167, + "supported_languages": null + }, + "macro.dbt_utils.default__group_by": { + "name": "default__group_by", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/groupby.sql", + "original_file_path": "macros/sql/groupby.sql", + "unique_id": "macro.dbt_utils.default__group_by", + "macro_sql": "\n\n{%- macro default__group_by(n) -%}\n\n group by {% for i in range(1, n + 1) -%}\n {{ i }}{{ ',' if not loop.last }} \n {%- endfor -%}\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.430441, + "supported_languages": null + }, + "macro.dbt_utils.deduplicate": { + "name": "deduplicate", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/deduplicate.sql", + "original_file_path": "macros/sql/deduplicate.sql", + "unique_id": "macro.dbt_utils.deduplicate", + "macro_sql": "{%- macro deduplicate(relation, partition_by, order_by) -%}\n {{ return(adapter.dispatch('deduplicate', 'dbt_utils')(relation, partition_by, order_by)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.snowflake__deduplicate"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.431345, + "supported_languages": null + }, + "macro.dbt_utils.default__deduplicate": { + "name": "default__deduplicate", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/deduplicate.sql", + "original_file_path": "macros/sql/deduplicate.sql", + "unique_id": "macro.dbt_utils.default__deduplicate", + "macro_sql": "\n\n{%- macro default__deduplicate(relation, partition_by, order_by) -%}\n\n with row_numbered as (\n select\n _inner.*,\n row_number() over (\n partition by {{ partition_by }}\n order by {{ order_by }}\n ) as rn\n from {{ relation }} as _inner\n )\n\n select\n distinct data.*\n from {{ relation }} as data\n {#\n -- Not all DBs will support natural joins but the ones that do include:\n -- Oracle, MySQL, SQLite, Redshift, Teradata, Materialize, Databricks\n -- Apache Spark, SingleStore, Vertica\n -- Those that do not appear to support natural joins include:\n -- SQLServer, Trino, Presto, Rockset, Athena\n #}\n natural join row_numbered\n where row_numbered.rn = 1\n\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4316711, + "supported_languages": null + }, + "macro.dbt_utils.redshift__deduplicate": { + "name": "redshift__deduplicate", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/deduplicate.sql", + "original_file_path": "macros/sql/deduplicate.sql", + "unique_id": "macro.dbt_utils.redshift__deduplicate", + "macro_sql": "{% macro redshift__deduplicate(relation, partition_by, order_by) -%}\n\n {{ return(dbt_utils.default__deduplicate(relation, partition_by, order_by=order_by)) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__deduplicate"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.431958, + "supported_languages": null + }, + "macro.dbt_utils.postgres__deduplicate": { + "name": "postgres__deduplicate", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/deduplicate.sql", + "original_file_path": "macros/sql/deduplicate.sql", + "unique_id": "macro.dbt_utils.postgres__deduplicate", + "macro_sql": "\n{%- macro postgres__deduplicate(relation, partition_by, order_by) -%}\n\n select\n distinct on ({{ partition_by }}) *\n from {{ relation }}\n order by {{ partition_by }}{{ ',' ~ order_by }}\n\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.432183, + "supported_languages": null + }, + "macro.dbt_utils.snowflake__deduplicate": { + "name": "snowflake__deduplicate", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/deduplicate.sql", + "original_file_path": "macros/sql/deduplicate.sql", + "unique_id": "macro.dbt_utils.snowflake__deduplicate", + "macro_sql": "\n{%- macro snowflake__deduplicate(relation, partition_by, order_by) -%}\n\n select *\n from {{ relation }}\n qualify\n row_number() over (\n partition by {{ partition_by }}\n order by {{ order_by }}\n ) = 1\n\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.43237, + "supported_languages": null + }, + "macro.dbt_utils.bigquery__deduplicate": { + "name": "bigquery__deduplicate", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/deduplicate.sql", + "original_file_path": "macros/sql/deduplicate.sql", + "unique_id": "macro.dbt_utils.bigquery__deduplicate", + "macro_sql": "\n{%- macro bigquery__deduplicate(relation, partition_by, order_by) -%}\n\n select unique.*\n from (\n select\n array_agg (\n original\n order by {{ order_by }}\n limit 1\n )[offset(0)] unique\n from {{ relation }} original\n group by {{ partition_by }}\n )\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.432553, + "supported_languages": null + }, + "macro.dbt_utils.surrogate_key": { + "name": "surrogate_key", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/surrogate_key.sql", + "original_file_path": "macros/sql/surrogate_key.sql", + "unique_id": "macro.dbt_utils.surrogate_key", + "macro_sql": "{%- macro surrogate_key(field_list) -%}\n {% set frustrating_jinja_feature = varargs %}\n {{ return(adapter.dispatch('surrogate_key', 'dbt_utils')(field_list, *varargs)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__surrogate_key"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.432987, + "supported_languages": null + }, + "macro.dbt_utils.default__surrogate_key": { + "name": "default__surrogate_key", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/surrogate_key.sql", + "original_file_path": "macros/sql/surrogate_key.sql", + "unique_id": "macro.dbt_utils.default__surrogate_key", + "macro_sql": "\n\n{%- macro default__surrogate_key(field_list) -%}\n\n{%- set error_message = '\nWarning: `dbt_utils.surrogate_key` has been replaced by \\\n`dbt_utils.generate_surrogate_key`. The new macro treats null values \\\ndifferently to empty strings. To restore the behaviour of the original \\\nmacro, add a global variable in dbt_project.yml called \\\n`surrogate_key_treat_nulls_as_empty_strings` to your \\\ndbt_project.yml file with a value of True. \\\nThe {}.{} model triggered this warning. \\\n'.format(model.package_name, model.name) -%}\n\n{%- do exceptions.raise_compiler_error(error_message) -%}\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.433248, + "supported_languages": null + }, + "macro.dbt_utils.safe_add": { + "name": "safe_add", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/safe_add.sql", + "original_file_path": "macros/sql/safe_add.sql", + "unique_id": "macro.dbt_utils.safe_add", + "macro_sql": "{%- macro safe_add(field_list) -%}\n {{ return(adapter.dispatch('safe_add', 'dbt_utils')(field_list)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__safe_add"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.433641, + "supported_languages": null + }, + "macro.dbt_utils.default__safe_add": { + "name": "default__safe_add", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/safe_add.sql", + "original_file_path": "macros/sql/safe_add.sql", + "unique_id": "macro.dbt_utils.default__safe_add", + "macro_sql": "\n\n{%- macro default__safe_add(field_list) -%}\n\n{%- if field_list is not iterable or field_list is string or field_list is mapping -%}\n\n{%- set error_message = '\nWarning: the `safe_add` macro now takes a single list argument instead of \\\nstring arguments. The {}.{} model triggered this warning. \\\n'.format(model.package_name, model.name) -%}\n\n{%- do exceptions.warn(error_message) -%}\n\n{%- endif -%}\n\n{% set fields = [] %}\n\n{%- for field in field_list -%}\n\n {% do fields.append(\"coalesce(\" ~ field ~ \", 0)\") %}\n\n{%- endfor -%}\n\n{{ fields|join(' +\\n ') }}\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.434238, + "supported_languages": null + }, + "macro.dbt_utils.nullcheck": { + "name": "nullcheck", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/nullcheck.sql", + "original_file_path": "macros/sql/nullcheck.sql", + "unique_id": "macro.dbt_utils.nullcheck", + "macro_sql": "{% macro nullcheck(cols) %}\n {{ return(adapter.dispatch('nullcheck', 'dbt_utils')(cols)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__nullcheck"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.434622, + "supported_languages": null + }, + "macro.dbt_utils.default__nullcheck": { + "name": "default__nullcheck", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/nullcheck.sql", + "original_file_path": "macros/sql/nullcheck.sql", + "unique_id": "macro.dbt_utils.default__nullcheck", + "macro_sql": "{% macro default__nullcheck(cols) %}\n{%- for col in cols %}\n\n {% if col.is_string() -%}\n\n nullif({{col.name}},'') as {{col.name}}\n\n {%- else -%}\n\n {{col.name}}\n\n {%- endif -%}\n\n{%- if not loop.last -%} , {%- endif -%}\n\n{%- endfor -%}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.434985, + "supported_languages": null + }, + "macro.dbt_utils.get_tables_by_pattern_sql": { + "name": "get_tables_by_pattern_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_tables_by_pattern_sql.sql", + "original_file_path": "macros/sql/get_tables_by_pattern_sql.sql", + "unique_id": "macro.dbt_utils.get_tables_by_pattern_sql", + "macro_sql": "{% macro get_tables_by_pattern_sql(schema_pattern, table_pattern, exclude='', database=target.database) %}\n {{ return(adapter.dispatch('get_tables_by_pattern_sql', 'dbt_utils')\n (schema_pattern, table_pattern, exclude, database)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_tables_by_pattern_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.436805, + "supported_languages": null + }, + "macro.dbt_utils.default__get_tables_by_pattern_sql": { + "name": "default__get_tables_by_pattern_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_tables_by_pattern_sql.sql", + "original_file_path": "macros/sql/get_tables_by_pattern_sql.sql", + "unique_id": "macro.dbt_utils.default__get_tables_by_pattern_sql", + "macro_sql": "{% macro default__get_tables_by_pattern_sql(schema_pattern, table_pattern, exclude='', database=target.database) %}\n\n select distinct\n table_schema as {{ adapter.quote('table_schema') }},\n table_name as {{ adapter.quote('table_name') }},\n {{ dbt_utils.get_table_types_sql() }}\n from {{ database }}.information_schema.tables\n where table_schema ilike '{{ schema_pattern }}'\n and table_name ilike '{{ table_pattern }}'\n and table_name not ilike '{{ exclude }}'\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.get_table_types_sql"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.43723, + "supported_languages": null + }, + "macro.dbt_utils.bigquery__get_tables_by_pattern_sql": { + "name": "bigquery__get_tables_by_pattern_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_tables_by_pattern_sql.sql", + "original_file_path": "macros/sql/get_tables_by_pattern_sql.sql", + "unique_id": "macro.dbt_utils.bigquery__get_tables_by_pattern_sql", + "macro_sql": "{% macro bigquery__get_tables_by_pattern_sql(schema_pattern, table_pattern, exclude='', database=target.database) %}\n\n {% if '%' in schema_pattern %}\n {% set schemata=dbt_utils._bigquery__get_matching_schemata(schema_pattern, database) %}\n {% else %}\n {% set schemata=[schema_pattern] %}\n {% endif %}\n\n {% set sql %}\n {% for schema in schemata %}\n select distinct\n table_schema,\n table_name,\n {{ dbt_utils.get_table_types_sql() }}\n\n from {{ adapter.quote(database) }}.{{ schema }}.INFORMATION_SCHEMA.TABLES\n where lower(table_name) like lower ('{{ table_pattern }}')\n and lower(table_name) not like lower ('{{ exclude }}')\n\n {% if not loop.last %} union all {% endif %}\n\n {% endfor %}\n {% endset %}\n\n {{ return(sql) }}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils._bigquery__get_matching_schemata", + "macro.dbt_utils.get_table_types_sql" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.438118, + "supported_languages": null + }, + "macro.dbt_utils._bigquery__get_matching_schemata": { + "name": "_bigquery__get_matching_schemata", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_tables_by_pattern_sql.sql", + "original_file_path": "macros/sql/get_tables_by_pattern_sql.sql", + "unique_id": "macro.dbt_utils._bigquery__get_matching_schemata", + "macro_sql": "{% macro _bigquery__get_matching_schemata(schema_pattern, database) %}\n {% if execute %}\n\n {% set sql %}\n select schema_name from {{ adapter.quote(database) }}.INFORMATION_SCHEMA.SCHEMATA\n where lower(schema_name) like lower('{{ schema_pattern }}')\n {% endset %}\n\n {% set results=run_query(sql) %}\n\n {% set schemata=results.columns['schema_name'].values() %}\n\n {{ return(schemata) }}\n\n {% else %}\n\n {{ return([]) }}\n\n {% endif %}\n\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4388108, + "supported_languages": null + }, + "macro.dbt_utils.get_column_values": { + "name": "get_column_values", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_column_values.sql", + "original_file_path": "macros/sql/get_column_values.sql", + "unique_id": "macro.dbt_utils.get_column_values", + "macro_sql": "{% macro get_column_values(table, column, order_by='count(*) desc', max_records=none, default=none, where=none) -%}\n {{ return(adapter.dispatch('get_column_values', 'dbt_utils')(table, column, order_by, max_records, default, where)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_column_values"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.440032, + "supported_languages": null + }, + "macro.dbt_utils.default__get_column_values": { + "name": "default__get_column_values", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_column_values.sql", + "original_file_path": "macros/sql/get_column_values.sql", + "unique_id": "macro.dbt_utils.default__get_column_values", + "macro_sql": "{% macro default__get_column_values(table, column, order_by='count(*) desc', max_records=none, default=none, where=none) -%}\n {#-- Prevent querying of db in parsing mode. This works because this macro does not create any new refs. #}\n {%- if not execute -%}\n {% set default = [] if not default %}\n {{ return(default) }}\n {% endif %}\n\n {%- do dbt_utils._is_ephemeral(table, 'get_column_values') -%}\n\n {# Not all relations are tables. Renaming for internal clarity without breaking functionality for anyone using named arguments #}\n {# TODO: Change the method signature in a future 0.x.0 release #}\n {%- set target_relation = table -%}\n\n {# adapter.load_relation is a convenience wrapper to avoid building a Relation when we already have one #}\n {% set relation_exists = (load_relation(target_relation)) is not none %}\n\n {%- call statement('get_column_values', fetch_result=true) %}\n\n {%- if not relation_exists and default is none -%}\n\n {{ exceptions.raise_compiler_error(\"In get_column_values(): relation \" ~ target_relation ~ \" does not exist and no default value was provided.\") }}\n\n {%- elif not relation_exists and default is not none -%}\n\n {{ log(\"Relation \" ~ target_relation ~ \" does not exist. Returning the default value: \" ~ default) }}\n\n {{ return(default) }}\n\n {%- else -%}\n\n\n select\n {{ column }} as value\n\n from {{ target_relation }}\n\n {% if where is not none %}\n where {{ where }}\n {% endif %}\n\n group by {{ column }}\n order by {{ order_by }}\n\n {% if max_records is not none %}\n limit {{ max_records }}\n {% endif %}\n\n {% endif %}\n\n {%- endcall -%}\n\n {%- set value_list = load_result('get_column_values') -%}\n\n {%- if value_list and value_list['data'] -%}\n {%- set values = value_list['data'] | map(attribute=0) | list %}\n {{ return(values) }}\n {%- else -%}\n {{ return(default) }}\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils._is_ephemeral", + "macro.dbt.load_relation", + "macro.dbt.statement" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.441704, + "supported_languages": null + }, + "macro.dbt_utils.pivot": { + "name": "pivot", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/pivot.sql", + "original_file_path": "macros/sql/pivot.sql", + "unique_id": "macro.dbt_utils.pivot", + "macro_sql": "{% macro pivot(column,\n values,\n alias=True,\n agg='sum',\n cmp='=',\n prefix='',\n suffix='',\n then_value=1,\n else_value=0,\n quote_identifiers=True,\n distinct=False) %}\n {{ return(adapter.dispatch('pivot', 'dbt_utils')(column, values, alias, agg, cmp, prefix, suffix, then_value, else_value, quote_identifiers, distinct)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__pivot"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.443048, + "supported_languages": null + }, + "macro.dbt_utils.default__pivot": { + "name": "default__pivot", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/pivot.sql", + "original_file_path": "macros/sql/pivot.sql", + "unique_id": "macro.dbt_utils.default__pivot", + "macro_sql": "{% macro default__pivot(column,\n values,\n alias=True,\n agg='sum',\n cmp='=',\n prefix='',\n suffix='',\n then_value=1,\n else_value=0,\n quote_identifiers=True,\n distinct=False) %}\n {% for value in values %}\n {{ agg }}(\n {% if distinct %} distinct {% endif %}\n case\n when {{ column }} {{ cmp }} '{{ dbt.escape_single_quotes(value) }}'\n then {{ then_value }}\n else {{ else_value }}\n end\n )\n {% if alias %}\n {% if quote_identifiers %}\n as {{ adapter.quote(prefix ~ value ~ suffix) }}\n {% else %}\n as {{ dbt_utils.slugify(prefix ~ value ~ suffix) }}\n {% endif %}\n {% endif %}\n {% if not loop.last %},{% endif %}\n {% endfor %}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.escape_single_quotes", "macro.dbt_utils.slugify"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.44401, + "supported_languages": null + }, + "macro.dbt_utils.get_filtered_columns_in_relation": { + "name": "get_filtered_columns_in_relation", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_filtered_columns_in_relation.sql", + "original_file_path": "macros/sql/get_filtered_columns_in_relation.sql", + "unique_id": "macro.dbt_utils.get_filtered_columns_in_relation", + "macro_sql": "{% macro get_filtered_columns_in_relation(from, except=[]) -%}\n {{ return(adapter.dispatch('get_filtered_columns_in_relation', 'dbt_utils')(from, except)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_filtered_columns_in_relation"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.444524, + "supported_languages": null + }, + "macro.dbt_utils.default__get_filtered_columns_in_relation": { + "name": "default__get_filtered_columns_in_relation", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_filtered_columns_in_relation.sql", + "original_file_path": "macros/sql/get_filtered_columns_in_relation.sql", + "unique_id": "macro.dbt_utils.default__get_filtered_columns_in_relation", + "macro_sql": "{% macro default__get_filtered_columns_in_relation(from, except=[]) -%}\n {%- do dbt_utils._is_relation(from, 'get_filtered_columns_in_relation') -%}\n {%- do dbt_utils._is_ephemeral(from, 'get_filtered_columns_in_relation') -%}\n\n {# -- Prevent querying of db in parsing mode. This works because this macro does not create any new refs. #}\n {%- if not execute -%}\n {{ return('') }}\n {% endif %}\n\n {%- set include_cols = [] %}\n {%- set cols = adapter.get_columns_in_relation(from) -%}\n {%- set except = except | map(\"lower\") | list %}\n {%- for col in cols -%}\n {%- if col.column|lower not in except -%}\n {% do include_cols.append(col.column) %}\n {%- endif %}\n {%- endfor %}\n\n {{ return(include_cols) }}\n\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_utils._is_relation", + "macro.dbt_utils._is_ephemeral" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.445365, + "supported_languages": null + }, + "macro.dbt_utils.width_bucket": { + "name": "width_bucket", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/width_bucket.sql", + "original_file_path": "macros/sql/width_bucket.sql", + "unique_id": "macro.dbt_utils.width_bucket", + "macro_sql": "{% macro width_bucket(expr, min_value, max_value, num_buckets) %}\n {{ return(adapter.dispatch('width_bucket', 'dbt_utils') (expr, min_value, max_value, num_buckets)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.snowflake__width_bucket"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.446222, + "supported_languages": null + }, + "macro.dbt_utils.default__width_bucket": { + "name": "default__width_bucket", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/width_bucket.sql", + "original_file_path": "macros/sql/width_bucket.sql", + "unique_id": "macro.dbt_utils.default__width_bucket", + "macro_sql": "{% macro default__width_bucket(expr, min_value, max_value, num_buckets) -%}\n\n {% set bin_size -%}\n (( {{ max_value }} - {{ min_value }} ) / {{ num_buckets }} )\n {%- endset %}\n (\n -- to break ties when the amount is eaxtly at the bucket egde\n case\n when\n mod(\n {{ dbt.safe_cast(expr, dbt.type_numeric() ) }},\n {{ dbt.safe_cast(bin_size, dbt.type_numeric() ) }}\n ) = 0\n then 1\n else 0\n end\n ) +\n -- Anything over max_value goes the N+1 bucket\n least(\n ceil(\n ({{ expr }} - {{ min_value }})/{{ bin_size }}\n ),\n {{ num_buckets }} + 1\n )\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt.safe_cast", "macro.dbt.type_numeric"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.44674, + "supported_languages": null + }, + "macro.dbt_utils.snowflake__width_bucket": { + "name": "snowflake__width_bucket", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/width_bucket.sql", + "original_file_path": "macros/sql/width_bucket.sql", + "unique_id": "macro.dbt_utils.snowflake__width_bucket", + "macro_sql": "{% macro snowflake__width_bucket(expr, min_value, max_value, num_buckets) %}\n width_bucket({{ expr }}, {{ min_value }}, {{ max_value }}, {{ num_buckets }} )\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4469519, + "supported_languages": null + }, + "macro.dbt_utils.get_query_results_as_dict": { + "name": "get_query_results_as_dict", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_query_results_as_dict.sql", + "original_file_path": "macros/sql/get_query_results_as_dict.sql", + "unique_id": "macro.dbt_utils.get_query_results_as_dict", + "macro_sql": "{% macro get_query_results_as_dict(query) %}\n {{ return(adapter.dispatch('get_query_results_as_dict', 'dbt_utils')(query)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_query_results_as_dict"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4473758, + "supported_languages": null + }, + "macro.dbt_utils.default__get_query_results_as_dict": { + "name": "default__get_query_results_as_dict", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_query_results_as_dict.sql", + "original_file_path": "macros/sql/get_query_results_as_dict.sql", + "unique_id": "macro.dbt_utils.default__get_query_results_as_dict", + "macro_sql": "{% macro default__get_query_results_as_dict(query) %}\n\n{# This macro returns a dictionary of the form {column_name: (tuple_of_results)} #}\n\n {%- call statement('get_query_results', fetch_result=True,auto_begin=false) -%}\n\n {{ query }}\n\n {%- endcall -%}\n\n {% set sql_results={} %}\n\n {%- if execute -%}\n {% set sql_results_table = load_result('get_query_results').table.columns %}\n {% for column_name, column in sql_results_table.items() %}\n {% do sql_results.update({column_name: column.values()}) %}\n {% endfor %}\n {%- endif -%}\n\n {{ return(sql_results) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.448027, + "supported_languages": null + }, + "macro.dbt_utils.generate_surrogate_key": { + "name": "generate_surrogate_key", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/generate_surrogate_key.sql", + "original_file_path": "macros/sql/generate_surrogate_key.sql", + "unique_id": "macro.dbt_utils.generate_surrogate_key", + "macro_sql": "{%- macro generate_surrogate_key(field_list) -%}\n {{ return(adapter.dispatch('generate_surrogate_key', 'dbt_utils')(field_list)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__generate_surrogate_key"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4485319, + "supported_languages": null + }, + "macro.dbt_utils.default__generate_surrogate_key": { + "name": "default__generate_surrogate_key", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/generate_surrogate_key.sql", + "original_file_path": "macros/sql/generate_surrogate_key.sql", + "unique_id": "macro.dbt_utils.default__generate_surrogate_key", + "macro_sql": "\n\n{%- macro default__generate_surrogate_key(field_list) -%}\n\n{%- if var('surrogate_key_treat_nulls_as_empty_strings', False) -%}\n {%- set default_null_value = \"\" -%}\n{%- else -%}\n {%- set default_null_value = '_dbt_utils_surrogate_key_null_' -%}\n{%- endif -%}\n\n{%- set fields = [] -%}\n\n{%- for field in field_list -%}\n\n {%- do fields.append(\n \"coalesce(cast(\" ~ field ~ \" as \" ~ dbt.type_string() ~ \"), '\" ~ default_null_value ~\"')\"\n ) -%}\n\n {%- if not loop.last %}\n {%- do fields.append(\"'-'\") -%}\n {%- endif -%}\n\n{%- endfor -%}\n\n{{ dbt.hash(dbt.concat(fields)) }}\n\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt.type_string", + "macro.dbt.hash", + "macro.dbt.concat" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4492009, + "supported_languages": null + }, + "macro.dbt_utils.get_table_types_sql": { + "name": "get_table_types_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_table_types_sql.sql", + "original_file_path": "macros/sql/get_table_types_sql.sql", + "unique_id": "macro.dbt_utils.get_table_types_sql", + "macro_sql": "{%- macro get_table_types_sql() -%}\n {{ return(adapter.dispatch('get_table_types_sql', 'dbt_utils')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": ["macro.dbt_utils.default__get_table_types_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.449762, + "supported_languages": null + }, + "macro.dbt_utils.default__get_table_types_sql": { + "name": "default__get_table_types_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_table_types_sql.sql", + "original_file_path": "macros/sql/get_table_types_sql.sql", + "unique_id": "macro.dbt_utils.default__get_table_types_sql", + "macro_sql": "{% macro default__get_table_types_sql() %}\n case table_type\n when 'BASE TABLE' then 'table'\n when 'EXTERNAL TABLE' then 'external'\n when 'MATERIALIZED VIEW' then 'materializedview'\n else lower(table_type)\n end as {{ adapter.quote('table_type') }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.449916, + "supported_languages": null + }, + "macro.dbt_utils.postgres__get_table_types_sql": { + "name": "postgres__get_table_types_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_table_types_sql.sql", + "original_file_path": "macros/sql/get_table_types_sql.sql", + "unique_id": "macro.dbt_utils.postgres__get_table_types_sql", + "macro_sql": "{% macro postgres__get_table_types_sql() %}\n case table_type\n when 'BASE TABLE' then 'table'\n when 'FOREIGN' then 'external'\n when 'MATERIALIZED VIEW' then 'materializedview'\n else lower(table_type)\n end as {{ adapter.quote('table_type') }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.450067, + "supported_languages": null + }, + "macro.dbt_utils.databricks__get_table_types_sql": { + "name": "databricks__get_table_types_sql", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_table_types_sql.sql", + "original_file_path": "macros/sql/get_table_types_sql.sql", + "unique_id": "macro.dbt_utils.databricks__get_table_types_sql", + "macro_sql": "{% macro databricks__get_table_types_sql() %}\n case table_type\n when 'MANAGED' then 'table'\n when 'BASE TABLE' then 'table'\n when 'MATERIALIZED VIEW' then 'materializedview'\n else lower(table_type)\n end as {{ adapter.quote('table_type') }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.450211, + "supported_languages": null + }, + "macro.dbt_utils.get_single_value": { + "name": "get_single_value", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_single_value.sql", + "original_file_path": "macros/sql/get_single_value.sql", + "unique_id": "macro.dbt_utils.get_single_value", + "macro_sql": "{% macro get_single_value(query, default=none) %}\n {{ return(adapter.dispatch('get_single_value', 'dbt_utils')(query, default)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.default__get_single_value"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4507892, + "supported_languages": null + }, + "macro.dbt_utils.default__get_single_value": { + "name": "default__get_single_value", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/get_single_value.sql", + "original_file_path": "macros/sql/get_single_value.sql", + "unique_id": "macro.dbt_utils.default__get_single_value", + "macro_sql": "{% macro default__get_single_value(query, default) %}\n\n{# This macro returns the (0, 0) record in a query, i.e. the first row of the first column #}\n\n {%- call statement('get_query_result', fetch_result=True, auto_begin=false) -%}\n\n {{ query }}\n\n {%- endcall -%}\n\n {%- if execute -%}\n\n {% set r = load_result('get_query_result').table.columns[0].values() %}\n {% if r | length == 0 %}\n {% do print('Query `' ~ query ~ '` returned no rows. Using the default value: ' ~ default) %}\n {% set sql_result = default %}\n {% else %}\n {% set sql_result = r[0] %}\n {% endif %}\n \n {%- else -%}\n \n {% set sql_result = default %}\n \n {%- endif -%}\n\n {% do return(sql_result) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.451713, + "supported_languages": null + }, + "macro.dbt_utils.degrees_to_radians": { + "name": "degrees_to_radians", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/haversine_distance.sql", + "original_file_path": "macros/sql/haversine_distance.sql", + "unique_id": "macro.dbt_utils.degrees_to_radians", + "macro_sql": "{% macro degrees_to_radians(degrees) -%}\n acos(-1) * {{degrees}} / 180\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.45299, + "supported_languages": null + }, + "macro.dbt_utils.haversine_distance": { + "name": "haversine_distance", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/haversine_distance.sql", + "original_file_path": "macros/sql/haversine_distance.sql", + "unique_id": "macro.dbt_utils.haversine_distance", + "macro_sql": "{% macro haversine_distance(lat1, lon1, lat2, lon2, unit='mi') -%}\n {{ return(adapter.dispatch('haversine_distance', 'dbt_utils')(lat1,lon1,lat2,lon2,unit)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_utils.default__haversine_distance"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.453314, + "supported_languages": null + }, + "macro.dbt_utils.default__haversine_distance": { + "name": "default__haversine_distance", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/haversine_distance.sql", + "original_file_path": "macros/sql/haversine_distance.sql", + "unique_id": "macro.dbt_utils.default__haversine_distance", + "macro_sql": "{% macro default__haversine_distance(lat1, lon1, lat2, lon2, unit='mi') -%}\n{%- if unit == 'mi' %}\n {% set conversion_rate = 1 %}\n{% elif unit == 'km' %}\n {% set conversion_rate = 1.60934 %}\n{% else %}\n {{ exceptions.raise_compiler_error(\"unit input must be one of 'mi' or 'km'. Got \" ~ unit) }}\n{% endif %}\n\n 2 * 3961 * asin(sqrt(power((sin(radians(({{ lat2 }} - {{ lat1 }}) / 2))), 2) +\n cos(radians({{lat1}})) * cos(radians({{lat2}})) *\n power((sin(radians(({{ lon2 }} - {{ lon1 }}) / 2))), 2))) * {{ conversion_rate }}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.453969, + "supported_languages": null + }, + "macro.dbt_utils.bigquery__haversine_distance": { + "name": "bigquery__haversine_distance", + "resource_type": "macro", + "package_name": "dbt_utils", + "path": "macros/sql/haversine_distance.sql", + "original_file_path": "macros/sql/haversine_distance.sql", + "unique_id": "macro.dbt_utils.bigquery__haversine_distance", + "macro_sql": "{% macro bigquery__haversine_distance(lat1, lon1, lat2, lon2, unit='mi') -%}\n{% set radians_lat1 = dbt_utils.degrees_to_radians(lat1) %}\n{% set radians_lat2 = dbt_utils.degrees_to_radians(lat2) %}\n{% set radians_lon1 = dbt_utils.degrees_to_radians(lon1) %}\n{% set radians_lon2 = dbt_utils.degrees_to_radians(lon2) %}\n{%- if unit == 'mi' %}\n {% set conversion_rate = 1 %}\n{% elif unit == 'km' %}\n {% set conversion_rate = 1.60934 %}\n{% else %}\n {{ exceptions.raise_compiler_error(\"unit input must be one of 'mi' or 'km'. Got \" ~ unit) }}\n{% endif %}\n 2 * 3961 * asin(sqrt(power(sin(({{ radians_lat2 }} - {{ radians_lat1 }}) / 2), 2) +\n cos({{ radians_lat1 }}) * cos({{ radians_lat2 }}) *\n power(sin(({{ radians_lon2 }} - {{ radians_lon1 }}) / 2), 2))) * {{ conversion_rate }}\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_utils.degrees_to_radians"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4548411, + "supported_languages": null + }, + "macro.dbt_snow_mask.create_masking_policy_mp_encrypt_pii": { + "name": "create_masking_policy_mp_encrypt_pii", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask-ddl/create_masking_policy_mp_encrypt_pii.sql", + "original_file_path": "macros/snow-mask-ddl/create_masking_policy_mp_encrypt_pii.sql", + "unique_id": "macro.dbt_snow_mask.create_masking_policy_mp_encrypt_pii", + "macro_sql": "{% macro create_masking_policy_mp_encrypt_pii(node_database,node_schema) %}\n\n CREATE MASKING POLICY IF NOT EXISTS {{node_database}}.{{node_schema}}.mp_encrypt_pii AS (val string) \n\n RETURNS string ->\n CASE WHEN CURRENT_ROLE() IN ('ANALYST') THEN val \n WHEN CURRENT_ROLE() IN ('SYSADMIN') THEN SHA2(val)\n ELSE '**********'\n END\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.455123, + "supported_languages": null + }, + "macro.dbt_snow_mask.create_masking_policy_mp_conditional_pii": { + "name": "create_masking_policy_mp_conditional_pii", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask-ddl/create_masking_policy_mp_conditional_pii.sql", + "original_file_path": "macros/snow-mask-ddl/create_masking_policy_mp_conditional_pii.sql", + "unique_id": "macro.dbt_snow_mask.create_masking_policy_mp_conditional_pii", + "macro_sql": "{% macro create_masking_policy_mp_conditional_pii(node_database, node_schema, masked_column) %}\n\n CREATE MASKING POLICY IF NOT EXISTS {{node_database}}.{{node_schema}}.mp_conditional_pii AS (\n {{masked_column}} string,\n my_conditional_col_1 string,\n my_conditional_col_2 string\n ) RETURNS string ->\n CASE \n WHEN CURRENT_ROLE() IN ('ANALYST') AND my_conditional_col_1='foo' THEN {{masked_column}}\n WHEN CURRENT_ROLE() IN ('ANALYST') AND my_conditional_col_2='bar' THEN SHA2({{masked_column}})\n WHEN CURRENT_ROLE() IN ('SYSADMIN') THEN SHA2({{masked_column}})\n ELSE '**********'\n END\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.455591, + "supported_languages": null + }, + "macro.dbt_snow_mask.get_meta_objects": { + "name": "get_meta_objects", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/get_meta_objects.sql", + "original_file_path": "macros/snow-mask/get_meta_objects.sql", + "unique_id": "macro.dbt_snow_mask.get_meta_objects", + "macro_sql": "{% macro get_meta_objects(node_unique_id, meta_key,node_resource_type=\"model\") %}\n\t{% if execute %}\n\n {% set meta_columns = [] %}\n {% if node_resource_type == \"source\" %} \n {% set columns = graph.sources[node_unique_id]['columns'] %}\n {% else %}\n {% set columns = graph.nodes[node_unique_id]['columns'] %}\n {% endif %}\n \n {% if meta_key is not none %}\n {% if node_resource_type == \"source\" %} \n {% for column in columns if graph.sources[node_unique_id]['columns'][column]['meta'][meta_key] | length > 0 %}\n {% set meta_dict = graph.sources[node_unique_id]['columns'][column]['meta'] %}\n {% if meta_key in meta_dict %}\n {% set policy_name = meta_dict[meta_key] %}\n {% if \"mp_conditional_columns\" in meta_dict %}\n {% set conditional_columns = meta_dict['mp_conditional_columns'] %}\n {% else %}\n {% set conditional_columns = [] %}\n {% endif %}\n {% set meta_tuple = (column, policy_name, conditional_columns) %}\n {% do meta_columns.append(meta_tuple) %}\n {% endif %}\n {% endfor %}\n {% else %}\n {% for column in columns if graph.nodes[node_unique_id]['columns'][column]['meta'][meta_key] | length > 0 %}\n {% set meta_dict = graph.nodes[node_unique_id]['columns'][column]['meta'] %}\n {% if meta_key in meta_dict %}\n {% set policy_name = meta_dict[meta_key] %}\n {% if \"mp_conditional_columns\" in meta_dict %}\n {% set conditional_columns = meta_dict['mp_conditional_columns'] %}\n {% else %}\n {% set conditional_columns = [] %}\n {% endif %}\n {% set meta_tuple = (column, policy_name, conditional_columns) %}\n {% do meta_columns.append(meta_tuple) %}\n {% endif %}\n {% endfor %}\n {% endif %}\n {% else %}\n {% do meta_columns.append(column|upper) %}\n {% endif %}\n\n {{ return(meta_columns) }}\n\n {% endif %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4608, + "supported_languages": null + }, + "macro.dbt_snow_mask.apply_masking_policy_list_for_models": { + "name": "apply_masking_policy_list_for_models", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/apply-policy/apply_masking_policy_list_for_models.sql", + "original_file_path": "macros/snow-mask/apply-policy/apply_masking_policy_list_for_models.sql", + "unique_id": "macro.dbt_snow_mask.apply_masking_policy_list_for_models", + "macro_sql": "{% macro apply_masking_policy_list_for_models(meta_key,operation_type=\"apply\") %}\n\n{% if execute %}\n\n {% if operation_type == \"apply\" %}\n \n {% set model_id = model.unique_id | string %}\n {% set alias = model.alias %} \n {% set database = model.database %}\n {% set schema = model.schema %}\n {% set model_resource_type = model.resource_type | string %}\n\n {% if model_resource_type|lower in [\"model\", \"snapshot\"] %}\n\n {# This dictionary stores a mapping between materializations in dbt and the objects they will generate in Snowflake #}\n {% set materialization_map = {\"table\": \"table\", \"view\": \"view\", \"incremental\": \"table\", \"snapshot\": \"table\"} %}\n\n {# Append custom materializations to the list of standard materializations #}\n {% do materialization_map.update(fromjson(var('custom_materializations_map', '{}'))) %}\n\n {% set materialization = materialization_map[model.config.get(\"materialized\")] %}\n {% set meta_columns = dbt_snow_mask.get_meta_objects(model_id,meta_key) %}\n\n {% set masking_policy_db = model.database %}\n {% set masking_policy_schema = model.schema %}\n \n {# Override the database and schema name when use_common_masking_policy_db flag is set #}\n {%- if (var('use_common_masking_policy_db', 'False')|upper in ['TRUE','YES']) -%}\n {% if (var('common_masking_policy_db') and var('common_masking_policy_schema')) %}\n {% set masking_policy_db = var('common_masking_policy_db') | string %}\n {% set masking_policy_schema = var('common_masking_policy_schema') | string %}\n {% endif %}\n {% endif %}\n\n {# Override the schema name (in the masking_policy_db) when use_common_masking_policy_schema_only flag is set #}\n {%- if (var('use_common_masking_policy_schema_only', 'False')|upper in ['TRUE','YES']) and (var('use_common_masking_policy_db', 'False')|upper in ['FALSE','NO']) -%}\n {% if var('common_masking_policy_schema') %}\n {% set masking_policy_schema = var('common_masking_policy_schema') | string %}\n {% endif %}\n {% endif %}\n\n {% set masking_policy_list_sql %} \n show masking policies in {{masking_policy_db}}.{{masking_policy_schema}};\n select $3||'.'||$4||'.'||$2 as masking_policy from table(result_scan(last_query_id()));\n {% endset %}\n\n {# If there are some masking policies to be applied in this model, we should show the masking policies in the schema #}\n {% if meta_columns | length > 0 %}\n {% set masking_policy_list = dbt_utils.get_query_results_as_dict(masking_policy_list_sql) %}\n {% endif %}\n\n {%- for meta_tuple in meta_columns if meta_columns | length > 0 %}\n {% set column = meta_tuple[0] %}\n {% set masking_policy_name = meta_tuple[1] %}\n {% set conditional_columns = meta_tuple[2] %}\n \n {% if masking_policy_name is not none %}\n\n {% for masking_policy_in_db in masking_policy_list['MASKING_POLICY'] %}\n {% if masking_policy_db|upper ~ '.' ~ masking_policy_schema|upper ~ '.' ~ masking_policy_name|upper == masking_policy_in_db %}\n {{ log(modules.datetime.datetime.now().strftime(\"%H:%M:%S\") ~ \" | \" ~ operation_type ~ \"ing masking policy to model : \" ~ masking_policy_db|upper ~ '.' ~ masking_policy_schema|upper ~ '.' ~ masking_policy_name|upper ~ \" on \" ~ database ~ '.' ~ schema ~ '.' ~ alias ~ '.' ~ column ~ ' [force = ' ~ var('use_force_applying_masking_policy','False') ~ ']', info=True) }}\n {% set query %}\n alter {{materialization}} {{database}}.{{schema}}.{{alias}}\n modify column {{column}}\n set masking policy {{masking_policy_db}}.{{masking_policy_schema}}.{{masking_policy_name}} {% if conditional_columns | length > 0 %}using ({{column}}, {{conditional_columns|join(', ')}}){% endif %} {% if var('use_force_applying_masking_policy','False')|upper in ['TRUE','YES'] %} force {% endif %};\n {% endset %}\n {% do run_query(query) %}\n {% endif %}\n {% endfor %}\n\n {% endif %}\n {% endfor %}\n\n {% endif %}\n \n {% elif operation_type == \"unapply\" %}\n\n {% for node in graph.nodes.values() -%}\n\n {% set database = node.database | string %}\n {% set schema = node.schema | string %}\n {% set node_unique_id = node.unique_id | string %}\n {% set node_resource_type = node.resource_type | string %}\n {% set materialization_map = {\"table\": \"table\", \"view\": \"view\", \"incremental\": \"table\", \"snapshot\": \"table\"} %}\n\n {% if node_resource_type|lower in [\"model\", \"snapshot\"] %}\n\n {# Append custom materializations to the list of standard materializations #}\n {% do materialization_map.update(fromjson(var('custom_materializations_map', '{}'))) %}\n\n {% set materialization = materialization_map[node.config.get(\"materialized\")] %}\n {% set alias = node.alias %}\n\n {% set meta_columns = dbt_snow_mask.get_meta_objects(node_unique_id,meta_key,node_resource_type) %}\n\n {%- for meta_tuple in meta_columns if meta_columns | length > 0 %}\n {% set column = meta_tuple[0] %}\n {% set masking_policy_name = meta_tuple[1] %}\n\n {% if masking_policy_name is not none %}\n {{ log(modules.datetime.datetime.now().strftime(\"%H:%M:%S\") ~ \" | \" ~ operation_type ~ \"ing masking policy to model : \" ~ database|upper ~ '.' ~ schema|upper ~ '.' ~ masking_policy_name|upper ~ \" on \" ~ database ~ '.' ~ schema ~ '.' ~ alias ~ '.' ~ column, info=True) }}\n {% set query %}\n alter {{materialization}} {{database}}.{{schema}}.{{alias}} modify column {{column}} unset masking policy\n {% endset %}\n {% do run_query(query) %}\n {% endif %}\n \n {% endfor %}\n\n {% endif %}\n\n {% endfor %}\n\n {% endif %}\n\n{% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snow_mask.get_meta_objects", + "macro.dbt_utils.get_query_results_as_dict", + "macro.dbt.run_query" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4702542, + "supported_languages": null + }, + "macro.dbt_snow_mask.apply_masking_policy_list_for_sources": { + "name": "apply_masking_policy_list_for_sources", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/apply-policy/apply_masking_policy_list_for_sources.sql", + "original_file_path": "macros/snow-mask/apply-policy/apply_masking_policy_list_for_sources.sql", + "unique_id": "macro.dbt_snow_mask.apply_masking_policy_list_for_sources", + "macro_sql": "{% macro apply_masking_policy_list_for_sources(meta_key,operation_type=\"apply\") %}\n\n{% if execute %}\n\n {% for node in graph.sources.values() -%}\n\n {% set database = node.database | string %}\n {% set schema = node.schema | string %}\n {% set name = node.name | string %}\n {% set identifier = (node.identifier | default(name, True)) | string %}\n\n {% set unique_id = node.unique_id | string %}\n {% set resource_type = node.resource_type | string %}\n {% set materialization = \"table\" %}\n\n {% set relation = adapter.get_relation(database=database, schema=schema, identifier=identifier) %}\n {% if relation.is_view %}\n {% set materialization = \"view\" %}\n {% endif %}\n\n {% set meta_columns = dbt_snow_mask.get_meta_objects(unique_id,meta_key,resource_type) %}\n\n {# Use the database and schema for the source node: #}\n {# In the apple for models variant of this file it instead uses the model.database/schema metadata #}\n {% set masking_policy_db = node.database %}\n {% set masking_policy_schema = node.schema %}\n\t\t\n {# Override the database and schema name when use_common_masking_policy_db flag is set #}\n {%- if (var('use_common_masking_policy_db', 'False')|upper in ['TRUE','YES']) -%}\n {% if (var('common_masking_policy_db') and var('common_masking_policy_schema')) %}\n {% set masking_policy_db = var('common_masking_policy_db') | string %}\n {% set masking_policy_schema = var('common_masking_policy_schema') | string %}\n {% endif %}\n {% endif %}\n\n {# Override the schema name (in the masking_policy_db) when use_common_masking_policy_schema_only flag is set #}\n {%- if (var('use_common_masking_policy_schema_only', 'False')|upper in ['TRUE','YES']) and (var('use_common_masking_policy_db', 'False')|upper in ['FALSE','NO']) -%}\n {% if var('common_masking_policy_schema') %}\n {% set masking_policy_schema = var('common_masking_policy_schema') | string %}\n {% endif %}\n {% endif %}\n\n {% set masking_policy_list_sql %}\n show masking policies in {{masking_policy_db}}.{{masking_policy_schema}};\n select $3||'.'||$4||'.'||$2 as masking_policy from table(result_scan(last_query_id()));\n {% endset %}\n\n {# If there are some masking policies to be applied in this model, we should show the masking policies in the schema #}\n {% if meta_columns | length > 0 %}\n {% set masking_policy_list = dbt_utils.get_query_results_as_dict(masking_policy_list_sql) %}\n {% endif %}\n\n {%- for meta_tuple in meta_columns if meta_columns | length > 0 %}\n {% set column = meta_tuple[0] %}\n {% set masking_policy_name = meta_tuple[1] %}\n {% set conditional_columns = meta_tuple[2] %}\n\n {% if masking_policy_name is not none %}\n\n {% for masking_policy_in_db in masking_policy_list['MASKING_POLICY'] %}\n {% if masking_policy_db|upper ~ '.' ~ masking_policy_schema|upper ~ '.' ~ masking_policy_name|upper == masking_policy_in_db %}\n {{ log(modules.datetime.datetime.now().strftime(\"%H:%M:%S\") ~ \" | \" ~ operation_type ~ \"ing masking policy to source : \" ~ masking_policy_db|upper ~ '.' ~ masking_policy_schema|upper ~ '.' ~ masking_policy_name|upper ~ \" on \" ~ database ~ '.' ~ schema ~ '.' ~ identifier ~ '.' ~ column ~ ' [force = ' ~ var('use_force_applying_masking_policy','False') ~ ']', info=True) }}\n {% set query %}\n {% if operation_type == \"apply\" %}\n alter {{materialization}} {{database}}.{{schema}}.{{identifier}}\n modify column {{column}}\n set masking policy {{masking_policy_db}}.{{masking_policy_schema}}.{{masking_policy_name}} {% if conditional_columns | length > 0 %}using ({{column}}, {{conditional_columns|join(', ')}}){% endif %} {% if var('use_force_applying_masking_policy','False')|upper in ['TRUE','YES'] %} force {% endif %}\n {% elif operation_type == \"unapply\" %}\n alter {{materialization}} {{database}}.{{schema}}.{{identifier}} modify column {{column}} unset masking policy\n {% endif %}\n {% endset %}\n {% do run_query(query) %}\n {% endif %}\n {% endfor %}\n {% endif %}\n\n {% endfor %}\n\n {% endfor %}\n\n{% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snow_mask.get_meta_objects", + "macro.dbt_utils.get_query_results_as_dict", + "macro.dbt.run_query" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.477505, + "supported_languages": null + }, + "macro.dbt_snow_mask.unapply_masking_policy": { + "name": "unapply_masking_policy", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/apply-policy/unapply_masking_policy.sql", + "original_file_path": "macros/snow-mask/apply-policy/unapply_masking_policy.sql", + "unique_id": "macro.dbt_snow_mask.unapply_masking_policy", + "macro_sql": "{% macro unapply_masking_policy(resource_type=\"models\",meta_key=\"masking_policy\",operation_type=\"unapply\") %}\n\n {% if execute %}\n\n {% if resource_type == \"sources\" %}\n {{ dbt_snow_mask.apply_masking_policy_list_for_sources(meta_key,operation_type) }}\n {% elif resource_type|lower in [\"models\", \"snapshots\"] %}\n {{ dbt_snow_mask.apply_masking_policy_list_for_models(meta_key,operation_type) }}\n {% endif %}\n\n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snow_mask.apply_masking_policy_list_for_sources", + "macro.dbt_snow_mask.apply_masking_policy_list_for_models" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.478204, + "supported_languages": null + }, + "macro.dbt_snow_mask.apply_masking_policy": { + "name": "apply_masking_policy", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/apply-policy/apply_masking_policy.sql", + "original_file_path": "macros/snow-mask/apply-policy/apply_masking_policy.sql", + "unique_id": "macro.dbt_snow_mask.apply_masking_policy", + "macro_sql": "{% macro apply_masking_policy(resource_type=\"models\",meta_key=\"masking_policy\") %}\n\n {% if execute %}\n\n {% if resource_type == \"sources\" %}\n {{ dbt_snow_mask.apply_masking_policy_list_for_sources(meta_key) }}\n {% elif resource_type|lower in [\"models\", \"snapshots\"] %}\n {{ dbt_snow_mask.apply_masking_policy_list_for_models(meta_key) }}\n {% endif %}\n\n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snow_mask.apply_masking_policy_list_for_sources", + "macro.dbt_snow_mask.apply_masking_policy_list_for_models" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4788322, + "supported_languages": null + }, + "macro.dbt_snow_mask.get_masking_policy_list_for_models": { + "name": "get_masking_policy_list_for_models", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/create-policy/get_masking_policy_list_for_models.sql", + "original_file_path": "macros/snow-mask/create-policy/get_masking_policy_list_for_models.sql", + "unique_id": "macro.dbt_snow_mask.get_masking_policy_list_for_models", + "macro_sql": "{% macro get_masking_policy_list_for_models(meta_key) %}\n\n {% set masking_policies = [] %}\n\n {% for node in graph.nodes.values() -%}\n\n {{ log(modules.datetime.datetime.now().strftime(\"%H:%M:%S\") ~ \" | macro - now processing : \" ~ node.unique_id | string , info=False) }}\n \n {% set node_database = node.database | string %}\n {% set node_schema = node.schema | string %}\n {% set node_unique_id = node.unique_id | string %}\n {% set node_resource_type = node.resource_type | string %}\n\n {% set meta_columns = dbt_snow_mask.get_meta_objects(node_unique_id,meta_key,node_resource_type) %}\n \n {%- for meta_tuple in meta_columns if meta_columns | length > 0 %}\n {{ log(modules.datetime.datetime.now().strftime(\"%H:%M:%S\") ~ \" | macro - meta_columns : \" ~ node_unique_id ~ \" has \" ~ meta_columns | string ~ \" masking tags set\", info=False) }}\n\n {% set column = meta_tuple[0] %}\n {% set masking_policy_name = meta_tuple[1] %}\n {% set conditional_columns = meta_tuple[2] %}\n {% if conditional_columns | length > 0 %}\n {% set conditionally_masked_column = column %}\n {% else %}\n {% set conditionally_masked_column = none %}\n {% endif %}\n {% if masking_policy_name is not none %}\n {% set masking_policy_tuple = (node_database, node_schema, masking_policy_name, conditionally_masked_column) %}\n {% do masking_policies.append(masking_policy_tuple) %}\n {% endif %}\n\n {% endfor %}\n \n {% endfor %}\n\n {{ return(masking_policies) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_snow_mask.get_meta_objects"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.481529, + "supported_languages": null + }, + "macro.dbt_snow_mask.get_masking_policy_list_for_sources": { + "name": "get_masking_policy_list_for_sources", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/create-policy/get_masking_policy_list_for_sources.sql", + "original_file_path": "macros/snow-mask/create-policy/get_masking_policy_list_for_sources.sql", + "unique_id": "macro.dbt_snow_mask.get_masking_policy_list_for_sources", + "macro_sql": "{% macro get_masking_policy_list_for_sources(meta_key) %}\n\n {% set masking_policies = [] %}\n\n {% for node in graph.sources.values() -%}\n\n {{ log(modules.datetime.datetime.now().strftime(\"%H:%M:%S\") ~ \" | macro - now processing : \" ~ node.unique_id | string , info=False) }}\n \n {% set node_database = node.database | string %}\n {% set node_schema = node.schema | string %}\n {% set node_unique_id = node.unique_id | string %}\n {% set node_resource_type = node.resource_type | string %}\n\n {% set meta_columns = dbt_snow_mask.get_meta_objects(node_unique_id,meta_key,node_resource_type) %}\n \n {%- for meta_tuple in meta_columns if meta_columns | length > 0 %}\n {{ log(modules.datetime.datetime.now().strftime(\"%H:%M:%S\") ~ \" | macro - meta_columns : \" ~ node_unique_id ~ \" has \" ~ meta_columns | string ~ \" masking tags set\", info=False) }}\n\n {% set column = meta_tuple[0] %}\n {% set masking_policy_name = meta_tuple[1] %}\n {% set conditional_columns = meta_tuple[2] %}\n {% if conditional_columns | length > 0 %}\n {% set conditionally_masked_column = column %}\n {% else %}\n {% set conditionally_masked_column = none %}\n {% endif %}\n {% if masking_policy_name is not none %}\n {% set masking_policy_tuple = (node_database, node_schema, masking_policy_name, conditionally_masked_column) %}\n {% do masking_policies.append(masking_policy_tuple) %}\n {% endif %}\n\n {% endfor %}\n \n {% endfor %}\n\n {{ return(masking_policies) }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_snow_mask.get_meta_objects"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.484503, + "supported_languages": null + }, + "macro.dbt_snow_mask.create_masking_policy": { + "name": "create_masking_policy", + "resource_type": "macro", + "package_name": "dbt_snow_mask", + "path": "macros/snow-mask/create-policy/create_masking_policy.sql", + "original_file_path": "macros/snow-mask/create-policy/create_masking_policy.sql", + "unique_id": "macro.dbt_snow_mask.create_masking_policy", + "macro_sql": "{% macro create_masking_policy(resource_type=\"sources\",meta_key=\"masking_policy\") %}\n\n{% if execute %}\n\n {% set masking_policies = [] %}\n\n {% if resource_type == \"sources\" %}\n {% set masking_policies = dbt_snow_mask.get_masking_policy_list_for_sources(meta_key) %}\n {% else %}\n {% set masking_policies = dbt_snow_mask.get_masking_policy_list_for_models(meta_key) %}\n {% endif %}\n\n {% for masking_policy in masking_policies | unique -%}\n\n {% set masking_policy_db = masking_policy[0] | string %}\n {% set masking_policy_schema = masking_policy[1] | string %}\n\n {# Override the database and schema name when use_common_masking_policy_db flag is set #}\n {%- if (var('use_common_masking_policy_db', 'False')|upper in ['TRUE','YES']) -%}\n {% if (var('common_masking_policy_db') and var('common_masking_policy_schema')) %}\n {% set masking_policy_db = var('common_masking_policy_db') | string %}\n {% set masking_policy_schema = var('common_masking_policy_schema') | string %}\n {% endif %}\n {% endif %}\n\n {# Override the schema name (in the masking_policy_db) when use_common_masking_policy_schema_only flag is set #}\n {%- if (var('use_common_masking_policy_schema_only', 'False')|upper in ['TRUE','YES']) and (var('use_common_masking_policy_db', 'False')|upper in ['FALSE','NO']) -%}\n {% if var('common_masking_policy_schema') %}\n {% set masking_policy_schema = var('common_masking_policy_schema') | string %}\n {% endif %}\n {% endif %}\n\n {% set current_policy_name = masking_policy[2] | string %}\n {% set conditionally_masked_column = masking_policy[3] %}\n\n {%- if (var('create_masking_policy_schema', 'True')|upper in ['TRUE','YES']) -%}\n {% do adapter.create_schema(api.Relation.create(database=masking_policy_db, schema=masking_policy_schema)) %}\n {% endif %}\n\n {% set call_masking_policy_macro = context[\"create_masking_policy_\" | string ~ current_policy_name | string] %}\n {% if conditionally_masked_column is not none %}\n {% set result = run_query(call_masking_policy_macro(masking_policy_db, masking_policy_schema, conditionally_masked_column)) %}\n {% else %}\n {% set result = run_query(call_masking_policy_macro(masking_policy_db, masking_policy_schema)) %}\n {% endif %}\n {% endfor %}\n\n{% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_snow_mask.get_masking_policy_list_for_sources", + "macro.dbt_snow_mask.get_masking_policy_list_for_models", + "macro.dbt.run_query" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.487481, + "supported_languages": null + }, + "macro.dbt_date.get_date_dimension": { + "name": "get_date_dimension", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/get_date_dimension.sql", + "original_file_path": "macros/get_date_dimension.sql", + "unique_id": "macro.dbt_date.get_date_dimension", + "macro_sql": "{% macro get_date_dimension(start_date, end_date) %}\n {{ adapter.dispatch('get_date_dimension', 'dbt_date') (start_date, end_date) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.default__get_date_dimension"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.4969668, + "supported_languages": null + }, + "macro.dbt_date.default__get_date_dimension": { + "name": "default__get_date_dimension", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/get_date_dimension.sql", + "original_file_path": "macros/get_date_dimension.sql", + "unique_id": "macro.dbt_date.default__get_date_dimension", + "macro_sql": "{% macro default__get_date_dimension(start_date, end_date) %}\nwith base_dates as (\n {{ dbt_date.get_base_dates(start_date, end_date) }}\n),\ndates_with_prior_year_dates as (\n\n select\n cast(d.date_day as date) as date_day,\n cast({{ dbt.dateadd('year', -1 , 'd.date_day') }} as date) as prior_year_date_day,\n cast({{ dbt.dateadd('day', -364 , 'd.date_day') }} as date) as prior_year_over_year_date_day\n from\n \tbase_dates d\n\n)\nselect\n d.date_day,\n {{ dbt_date.yesterday('d.date_day') }} as prior_date_day,\n {{ dbt_date.tomorrow('d.date_day') }} as next_date_day,\n d.prior_year_date_day as prior_year_date_day,\n d.prior_year_over_year_date_day,\n {{ dbt_date.day_of_week('d.date_day', isoweek=false) }} as day_of_week,\n {{ dbt_date.day_of_week('d.date_day', isoweek=true) }} as day_of_week_iso,\n {{ dbt_date.day_name('d.date_day', short=false) }} as day_of_week_name,\n {{ dbt_date.day_name('d.date_day', short=true) }} as day_of_week_name_short,\n {{ dbt_date.day_of_month('d.date_day') }} as day_of_month,\n {{ dbt_date.day_of_year('d.date_day') }} as day_of_year,\n\n {{ dbt_date.week_start('d.date_day') }} as week_start_date,\n {{ dbt_date.week_end('d.date_day') }} as week_end_date,\n {{ dbt_date.week_start('d.prior_year_over_year_date_day') }} as prior_year_week_start_date,\n {{ dbt_date.week_end('d.prior_year_over_year_date_day') }} as prior_year_week_end_date,\n {{ dbt_date.week_of_year('d.date_day') }} as week_of_year,\n\n {{ dbt_date.iso_week_start('d.date_day') }} as iso_week_start_date,\n {{ dbt_date.iso_week_end('d.date_day') }} as iso_week_end_date,\n {{ dbt_date.iso_week_start('d.prior_year_over_year_date_day') }} as prior_year_iso_week_start_date,\n {{ dbt_date.iso_week_end('d.prior_year_over_year_date_day') }} as prior_year_iso_week_end_date,\n {{ dbt_date.iso_week_of_year('d.date_day') }} as iso_week_of_year,\n\n {{ dbt_date.week_of_year('d.prior_year_over_year_date_day') }} as prior_year_week_of_year,\n {{ dbt_date.iso_week_of_year('d.prior_year_over_year_date_day') }} as prior_year_iso_week_of_year,\n\n cast({{ dbt_date.date_part('month', 'd.date_day') }} as {{ dbt.type_int() }}) as month_of_year,\n {{ dbt_date.month_name('d.date_day', short=false) }} as month_name,\n {{ dbt_date.month_name('d.date_day', short=true) }} as month_name_short,\n\n cast({{ dbt.date_trunc('month', 'd.date_day') }} as date) as month_start_date,\n cast({{ last_day('d.date_day', 'month') }} as date) as month_end_date,\n\n cast({{ dbt.date_trunc('month', 'd.prior_year_date_day') }} as date) as prior_year_month_start_date,\n cast({{ last_day('d.prior_year_date_day', 'month') }} as date) as prior_year_month_end_date,\n\n cast({{ dbt_date.date_part('quarter', 'd.date_day') }} as {{ dbt.type_int() }}) as quarter_of_year,\n cast({{ dbt.date_trunc('quarter', 'd.date_day') }} as date) as quarter_start_date,\n cast({{ last_day('d.date_day', 'quarter') }} as date) as quarter_end_date,\n\n cast({{ dbt_date.date_part('year', 'd.date_day') }} as {{ dbt.type_int() }}) as year_number,\n cast({{ dbt.date_trunc('year', 'd.date_day') }} as date) as year_start_date,\n cast({{ last_day('d.date_day', 'year') }} as date) as year_end_date\nfrom\n dates_with_prior_year_dates d\norder by 1\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_date.get_base_dates", + "macro.dbt.dateadd", + "macro.dbt_date.yesterday", + "macro.dbt_date.tomorrow", + "macro.dbt_date.day_of_week", + "macro.dbt_date.day_name", + "macro.dbt_date.day_of_month", + "macro.dbt_date.day_of_year", + "macro.dbt_date.week_start", + "macro.dbt_date.week_end", + "macro.dbt_date.week_of_year", + "macro.dbt_date.iso_week_start", + "macro.dbt_date.iso_week_end", + "macro.dbt_date.iso_week_of_year", + "macro.dbt_date.date_part", + "macro.dbt.type_int", + "macro.dbt_date.month_name", + "macro.dbt.date_trunc", + "macro.dbt.last_day" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.499265, + "supported_languages": null + }, + "macro.dbt_date.postgres__get_date_dimension": { + "name": "postgres__get_date_dimension", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/get_date_dimension.sql", + "original_file_path": "macros/get_date_dimension.sql", + "unique_id": "macro.dbt_date.postgres__get_date_dimension", + "macro_sql": "{% macro postgres__get_date_dimension(start_date, end_date) %}\nwith base_dates as (\n {{ dbt_date.get_base_dates(start_date, end_date) }}\n),\ndates_with_prior_year_dates as (\n\n select\n cast(d.date_day as date) as date_day,\n cast({{ dbt.dateadd('year', -1 , 'd.date_day') }} as date) as prior_year_date_day,\n cast({{ dbt.dateadd('day', -364 , 'd.date_day') }} as date) as prior_year_over_year_date_day\n from\n \tbase_dates d\n\n)\nselect\n d.date_day,\n {{ dbt_date.yesterday('d.date_day') }} as prior_date_day,\n {{ dbt_date.tomorrow('d.date_day') }} as next_date_day,\n d.prior_year_date_day as prior_year_date_day,\n d.prior_year_over_year_date_day,\n {{ dbt_date.day_of_week('d.date_day', isoweek=true) }} as day_of_week,\n\n {{ dbt_date.day_name('d.date_day', short=false) }} as day_of_week_name,\n {{ dbt_date.day_name('d.date_day', short=true) }} as day_of_week_name_short,\n {{ dbt_date.day_of_month('d.date_day') }} as day_of_month,\n {{ dbt_date.day_of_year('d.date_day') }} as day_of_year,\n\n {{ dbt_date.week_start('d.date_day') }} as week_start_date,\n {{ dbt_date.week_end('d.date_day') }} as week_end_date,\n {{ dbt_date.week_start('d.prior_year_over_year_date_day') }} as prior_year_week_start_date,\n {{ dbt_date.week_end('d.prior_year_over_year_date_day') }} as prior_year_week_end_date,\n {{ dbt_date.week_of_year('d.date_day') }} as week_of_year,\n\n {{ dbt_date.iso_week_start('d.date_day') }} as iso_week_start_date,\n {{ dbt_date.iso_week_end('d.date_day') }} as iso_week_end_date,\n {{ dbt_date.iso_week_start('d.prior_year_over_year_date_day') }} as prior_year_iso_week_start_date,\n {{ dbt_date.iso_week_end('d.prior_year_over_year_date_day') }} as prior_year_iso_week_end_date,\n {{ dbt_date.iso_week_of_year('d.date_day') }} as iso_week_of_year,\n\n {{ dbt_date.week_of_year('d.prior_year_over_year_date_day') }} as prior_year_week_of_year,\n {{ dbt_date.iso_week_of_year('d.prior_year_over_year_date_day') }} as prior_year_iso_week_of_year,\n\n cast({{ dbt_date.date_part('month', 'd.date_day') }} as {{ dbt.type_int() }}) as month_of_year,\n {{ dbt_date.month_name('d.date_day', short=false) }} as month_name,\n {{ dbt_date.month_name('d.date_day', short=true) }} as month_name_short,\n\n cast({{ dbt.date_trunc('month', 'd.date_day') }} as date) as month_start_date,\n cast({{ last_day('d.date_day', 'month') }} as date) as month_end_date,\n\n cast({{ dbt.date_trunc('month', 'd.prior_year_date_day') }} as date) as prior_year_month_start_date,\n cast({{ last_day('d.prior_year_date_day', 'month') }} as date) as prior_year_month_end_date,\n\n cast({{ dbt_date.date_part('quarter', 'd.date_day') }} as {{ dbt.type_int() }}) as quarter_of_year,\n cast({{ dbt.date_trunc('quarter', 'd.date_day') }} as date) as quarter_start_date,\n {# last_day does not support quarter because postgresql does not support quarter interval. #}\n cast({{dbt.dateadd('day', '-1', dbt.dateadd('month', '3', dbt.date_trunc('quarter', 'd.date_day')))}} as date) as quarter_end_date,\n\n cast({{ dbt_date.date_part('year', 'd.date_day') }} as {{ dbt.type_int() }}) as year_number,\n cast({{ dbt.date_trunc('year', 'd.date_day') }} as date) as year_start_date,\n cast({{ last_day('d.date_day', 'year') }} as date) as year_end_date\nfrom\n dates_with_prior_year_dates d\norder by 1\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_date.get_base_dates", + "macro.dbt.dateadd", + "macro.dbt_date.yesterday", + "macro.dbt_date.tomorrow", + "macro.dbt_date.day_of_week", + "macro.dbt_date.day_name", + "macro.dbt_date.day_of_month", + "macro.dbt_date.day_of_year", + "macro.dbt_date.week_start", + "macro.dbt_date.week_end", + "macro.dbt_date.week_of_year", + "macro.dbt_date.iso_week_start", + "macro.dbt_date.iso_week_end", + "macro.dbt_date.iso_week_of_year", + "macro.dbt_date.date_part", + "macro.dbt.type_int", + "macro.dbt_date.month_name", + "macro.dbt.date_trunc", + "macro.dbt.last_day" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.501696, + "supported_languages": null + }, + "macro.dbt_date.get_base_dates": { + "name": "get_base_dates", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/get_base_dates.sql", + "original_file_path": "macros/get_base_dates.sql", + "unique_id": "macro.dbt_date.get_base_dates", + "macro_sql": "{% macro get_base_dates(start_date=None, end_date=None, n_dateparts=None, datepart=\"day\") %}\n {{ adapter.dispatch('get_base_dates', 'dbt_date') (start_date, end_date, n_dateparts, datepart) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.default__get_base_dates"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.502968, + "supported_languages": null + }, + "macro.dbt_date.default__get_base_dates": { + "name": "default__get_base_dates", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/get_base_dates.sql", + "original_file_path": "macros/get_base_dates.sql", + "unique_id": "macro.dbt_date.default__get_base_dates", + "macro_sql": "{% macro default__get_base_dates(start_date, end_date, n_dateparts, datepart) %}\n\n{%- if start_date and end_date -%}\n{%- set start_date=\"cast('\" ~ start_date ~ \"' as \" ~ dbt.type_timestamp() ~ \")\" -%}\n{%- set end_date=\"cast('\" ~ end_date ~ \"' as \" ~ dbt.type_timestamp() ~ \")\" -%}\n\n{%- elif n_dateparts and datepart -%}\n\n{%- set start_date = dbt.dateadd(datepart, -1 * n_dateparts, dbt_date.today()) -%}\n{%- set end_date = dbt_date.tomorrow() -%}\n{%- endif -%}\n\nwith date_spine as\n(\n\n {{ dbt_date.date_spine(\n datepart=datepart,\n start_date=start_date,\n end_date=end_date,\n )\n }}\n\n)\nselect\n cast(d.date_{{ datepart }} as {{ dbt.type_timestamp() }}) as date_{{ datepart }}\nfrom\n date_spine d\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.type_timestamp", + "macro.dbt.dateadd", + "macro.dbt_date.today", + "macro.dbt_date.tomorrow", + "macro.dbt_date.date_spine" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.503888, + "supported_languages": null + }, + "macro.dbt_date.bigquery__get_base_dates": { + "name": "bigquery__get_base_dates", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/get_base_dates.sql", + "original_file_path": "macros/get_base_dates.sql", + "unique_id": "macro.dbt_date.bigquery__get_base_dates", + "macro_sql": "{% macro bigquery__get_base_dates(start_date, end_date, n_dateparts, datepart) %}\n\n{%- if start_date and end_date -%}\n{%- set start_date=\"cast('\" ~ start_date ~ \"' as date )\" -%}\n{%- set end_date=\"cast('\" ~ end_date ~ \"' as date )\" -%}\n\n{%- elif n_dateparts and datepart -%}\n\n{%- set start_date = dbt.dateadd(datepart, -1 * n_dateparts, dbt_date.today()) -%}\n{%- set end_date = dbt_date.tomorrow() -%}\n{%- endif -%}\n\nwith date_spine as\n(\n\n {{ dbt_date.date_spine(\n datepart=datepart,\n start_date=start_date,\n end_date=end_date,\n )\n }}\n\n)\nselect\n cast(d.date_{{ datepart }} as {{ dbt.type_timestamp() }}) as date_{{ datepart }}\nfrom\n date_spine d\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt.dateadd", + "macro.dbt_date.today", + "macro.dbt_date.tomorrow", + "macro.dbt_date.date_spine", + "macro.dbt.type_timestamp" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.504725, + "supported_languages": null + }, + "macro.dbt_date.get_intervals_between": { + "name": "get_intervals_between", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/date_spine.sql", + "original_file_path": "macros/_utils/date_spine.sql", + "unique_id": "macro.dbt_date.get_intervals_between", + "macro_sql": "{% macro get_intervals_between(start_date, end_date, datepart) -%}\n {{ return(adapter.dispatch('get_intervals_between', 'dbt_date')(start_date, end_date, datepart)) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.default__get_intervals_between"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.505601, + "supported_languages": null + }, + "macro.dbt_date.default__get_intervals_between": { + "name": "default__get_intervals_between", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/date_spine.sql", + "original_file_path": "macros/_utils/date_spine.sql", + "unique_id": "macro.dbt_date.default__get_intervals_between", + "macro_sql": "{% macro default__get_intervals_between(start_date, end_date, datepart) -%}\n {%- call statement('get_intervals_between', fetch_result=True) %}\n\n select {{ dbt.datediff(start_date, end_date, datepart) }}\n\n {%- endcall -%}\n\n {%- set value_list = load_result('get_intervals_between') -%}\n\n {%- if value_list and value_list['data'] -%}\n {%- set values = value_list['data'] | map(attribute=0) | list %}\n {{ return(values[0]) }}\n {%- else -%}\n {{ return(1) }}\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement", "macro.dbt.datediff"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.506274, + "supported_languages": null + }, + "macro.dbt_date.date_spine": { + "name": "date_spine", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/date_spine.sql", + "original_file_path": "macros/_utils/date_spine.sql", + "unique_id": "macro.dbt_date.date_spine", + "macro_sql": "{% macro date_spine(datepart, start_date, end_date) %}\n {{ return(adapter.dispatch('date_spine', 'dbt_date')(datepart, start_date, end_date)) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.default__date_spine"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.506527, + "supported_languages": null + }, + "macro.dbt_date.default__date_spine": { + "name": "default__date_spine", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/date_spine.sql", + "original_file_path": "macros/_utils/date_spine.sql", + "unique_id": "macro.dbt_date.default__date_spine", + "macro_sql": "{% macro default__date_spine(datepart, start_date, end_date) %}\n\n\n{# call as follows:\n\ndate_spine(\n \"day\",\n \"to_date('01/01/2016', 'mm/dd/yyyy')\",\n \"dbt.dateadd(week, 1, current_date)\"\n) #}\n\n\nwith rawdata as (\n\n {{\n dbt_date.generate_series(\n dbt_date.get_intervals_between(start_date, end_date, datepart)\n )\n }}\n\n),\n\nall_periods as (\n\n select (\n {{\n dbt.dateadd(\n datepart,\n \"row_number() over (order by 1) - 1\",\n start_date\n )\n }}\n ) as date_{{datepart}}\n from rawdata\n\n),\n\nfiltered as (\n\n select *\n from all_periods\n where date_{{datepart}} <= {{ end_date }}\n\n)\n\nselect * from filtered\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_date.generate_series", + "macro.dbt_date.get_intervals_between", + "macro.dbt.dateadd" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.506934, + "supported_languages": null + }, + "macro.dbt_date.get_powers_of_two": { + "name": "get_powers_of_two", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/generate_series.sql", + "original_file_path": "macros/_utils/generate_series.sql", + "unique_id": "macro.dbt_date.get_powers_of_two", + "macro_sql": "{% macro get_powers_of_two(upper_bound) %}\n {{ return(adapter.dispatch('get_powers_of_two', 'dbt_date')(upper_bound)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.default__get_powers_of_two"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.507905, + "supported_languages": null + }, + "macro.dbt_date.default__get_powers_of_two": { + "name": "default__get_powers_of_two", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/generate_series.sql", + "original_file_path": "macros/_utils/generate_series.sql", + "unique_id": "macro.dbt_date.default__get_powers_of_two", + "macro_sql": "{% macro default__get_powers_of_two(upper_bound) %}\n\n {% if upper_bound <= 0 %}\n {{ exceptions.raise_compiler_error(\"upper bound must be positive\") }}\n {% endif %}\n\n {% for _ in range(1, 100) %}\n {% if upper_bound <= 2 ** loop.index %}{{ return(loop.index) }}{% endif %}\n {% endfor %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.508366, + "supported_languages": null + }, + "macro.dbt_date.generate_series": { + "name": "generate_series", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/generate_series.sql", + "original_file_path": "macros/_utils/generate_series.sql", + "unique_id": "macro.dbt_date.generate_series", + "macro_sql": "{% macro generate_series(upper_bound) %}\n {{ return(adapter.dispatch('generate_series', 'dbt_date')(upper_bound)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.default__generate_series"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.508574, + "supported_languages": null + }, + "macro.dbt_date.default__generate_series": { + "name": "default__generate_series", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/_utils/generate_series.sql", + "original_file_path": "macros/_utils/generate_series.sql", + "unique_id": "macro.dbt_date.default__generate_series", + "macro_sql": "{% macro default__generate_series(upper_bound) %}\n\n {% set n = dbt_date.get_powers_of_two(upper_bound) %}\n\n with p as (\n select 0 as generated_number union all select 1\n ), unioned as (\n\n select\n\n {% for i in range(n) %}\n p{{i}}.generated_number * power(2, {{i}})\n {% if not loop.last %} + {% endif %}\n {% endfor %}\n + 1\n as generated_number\n\n from\n\n {% for i in range(n) %}\n p as p{{i}}\n {% if not loop.last %} cross join {% endif %}\n {% endfor %}\n\n )\n\n select *\n from unioned\n where generated_number <= {{upper_bound}}\n order by generated_number\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.get_powers_of_two"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.509141, + "supported_languages": null + }, + "macro.dbt_date.get_fiscal_year_dates": { + "name": "get_fiscal_year_dates", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/fiscal_date/get_fiscal_year_dates.sql", + "original_file_path": "macros/fiscal_date/get_fiscal_year_dates.sql", + "unique_id": "macro.dbt_date.get_fiscal_year_dates", + "macro_sql": "{% macro get_fiscal_year_dates(dates, year_end_month=12, week_start_day=1, shift_year=1) %}\n{{ adapter.dispatch('get_fiscal_year_dates', 'dbt_date') (dates, year_end_month, week_start_day, shift_year) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.default__get_fiscal_year_dates"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5111198, + "supported_languages": null + }, + "macro.dbt_date.default__get_fiscal_year_dates": { + "name": "default__get_fiscal_year_dates", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/fiscal_date/get_fiscal_year_dates.sql", + "original_file_path": "macros/fiscal_date/get_fiscal_year_dates.sql", + "unique_id": "macro.dbt_date.default__get_fiscal_year_dates", + "macro_sql": "{% macro default__get_fiscal_year_dates(dates, year_end_month, week_start_day, shift_year) %}\n-- this gets all the dates within a fiscal year\n-- determined by the given year-end-month\n-- ending on the saturday closest to that month's end date\nwith date_dimension as (\n select * from {{ dates }}\n),\nyear_month_end as (\n\n select\n d.year_number - {{ shift_year }} as fiscal_year_number,\n d.month_end_date\n from\n date_dimension d\n where\n d.month_of_year = {{ year_end_month }}\n group by 1,2\n\n),\nweeks as (\n\n select\n d.year_number,\n d.month_of_year,\n d.date_day as week_start_date,\n cast({{ dbt.dateadd('day', 6, 'd.date_day') }} as date) as week_end_date\n from\n date_dimension d\n where\n d.day_of_week = {{ week_start_day }}\n\n),\n-- get all the weeks that start in the month the year ends\nyear_week_ends as (\n\n select\n d.year_number - {{ shift_year }} as fiscal_year_number,\n d.week_end_date\n from\n weeks d\n where\n d.month_of_year = {{ year_end_month }}\n group by\n 1,2\n\n),\n-- then calculate which Saturday is closest to month end\nweeks_at_month_end as (\n\n select\n d.fiscal_year_number,\n d.week_end_date,\n m.month_end_date,\n rank() over\n (partition by d.fiscal_year_number\n order by\n abs({{ dbt.datediff('d.week_end_date', 'm.month_end_date', 'day') }})\n\n ) as closest_to_month_end\n from\n year_week_ends d\n join\n year_month_end m on d.fiscal_year_number = m.fiscal_year_number\n),\nfiscal_year_range as (\n\n select\n w.fiscal_year_number,\n cast(\n {{ dbt.dateadd('day', 1,\n 'lag(w.week_end_date) over(order by w.week_end_date)') }}\n as date) as fiscal_year_start_date,\n w.week_end_date as fiscal_year_end_date\n from\n weeks_at_month_end w\n where\n w.closest_to_month_end = 1\n\n),\nfiscal_year_dates as (\n\n select\n d.date_day,\n m.fiscal_year_number,\n m.fiscal_year_start_date,\n m.fiscal_year_end_date,\n w.week_start_date,\n w.week_end_date,\n -- we reset the weeks of the year starting with the merch year start date\n dense_rank()\n over(\n partition by m.fiscal_year_number\n order by w.week_start_date\n ) as fiscal_week_of_year\n from\n date_dimension d\n join\n fiscal_year_range m on d.date_day between m.fiscal_year_start_date and m.fiscal_year_end_date\n join\n weeks w on d.date_day between w.week_start_date and w.week_end_date\n\n)\nselect * from fiscal_year_dates order by 1\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.dateadd", "macro.dbt.datediff"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5119438, + "supported_languages": null + }, + "macro.dbt_date.get_fiscal_periods": { + "name": "get_fiscal_periods", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/fiscal_date/get_fiscal_periods.sql", + "original_file_path": "macros/fiscal_date/get_fiscal_periods.sql", + "unique_id": "macro.dbt_date.get_fiscal_periods", + "macro_sql": "{% macro get_fiscal_periods(dates, year_end_month, week_start_day, shift_year=1) %}\n{#\nThis macro requires you to pass in a ref to a date dimension, created via\ndbt_date.get_date_dimension()s\n#}\nwith fscl_year_dates_for_periods as (\n {{ dbt_date.get_fiscal_year_dates(dates, year_end_month, week_start_day, shift_year) }}\n),\nfscl_year_w13 as (\n\n select\n f.*,\n -- We count the weeks in a 13 week period\n -- and separate the 4-5-4 week sequences\n mod(cast(\n (f.fiscal_week_of_year-1) as {{ dbt.type_int() }}\n ), 13) as w13_number,\n -- Chop weeks into 13 week merch quarters\n cast(\n least(\n floor((f.fiscal_week_of_year-1)/13.0)\n , 3)\n as {{ dbt.type_int() }}) as quarter_number\n from\n fscl_year_dates_for_periods f\n\n),\nfscl_periods as (\n\n select\n f.date_day,\n f.fiscal_year_number,\n f.week_start_date,\n f.week_end_date,\n f.fiscal_week_of_year,\n case\n -- we move week 53 into the 3rd period of the quarter\n when f.fiscal_week_of_year = 53 then 3\n when f.w13_number between 0 and 3 then 1\n when f.w13_number between 4 and 8 then 2\n when f.w13_number between 9 and 12 then 3\n end as period_of_quarter,\n f.quarter_number\n from\n fscl_year_w13 f\n\n),\nfscl_periods_quarters as (\n\n select\n f.*,\n cast((\n (f.quarter_number * 3) + f.period_of_quarter\n ) as {{ dbt.type_int() }}) as fiscal_period_number\n from\n fscl_periods f\n\n)\nselect\n date_day,\n fiscal_year_number,\n week_start_date,\n week_end_date,\n fiscal_week_of_year,\n dense_rank() over(partition by fiscal_period_number order by fiscal_week_of_year) as fiscal_week_of_period,\n fiscal_period_number,\n quarter_number+1 as fiscal_quarter_number,\n period_of_quarter as fiscal_period_of_quarter\nfrom\n fscl_periods_quarters\norder by 1,2\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.get_fiscal_year_dates", "macro.dbt.type_int"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.513084, + "supported_languages": null + }, + "macro.dbt_date.tomorrow": { + "name": "tomorrow", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/tomorrow.sql", + "original_file_path": "macros/calendar_date/tomorrow.sql", + "unique_id": "macro.dbt_date.tomorrow", + "macro_sql": "{%- macro tomorrow(date=None, tz=None) -%}\n{{ dbt_date.n_days_away(1, date, tz) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.n_days_away"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.513324, + "supported_languages": null + }, + "macro.dbt_date.next_week": { + "name": "next_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/next_week.sql", + "original_file_path": "macros/calendar_date/next_week.sql", + "unique_id": "macro.dbt_date.next_week", + "macro_sql": "{%- macro next_week(tz=None) -%}\n{{ dbt_date.n_weeks_away(1, tz) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.n_weeks_away"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5135171, + "supported_languages": null + }, + "macro.dbt_date.next_month_name": { + "name": "next_month_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/next_month_name.sql", + "original_file_path": "macros/calendar_date/next_month_name.sql", + "unique_id": "macro.dbt_date.next_month_name", + "macro_sql": "{%- macro next_month_name(short=True, tz=None) -%}\n{{ dbt_date.month_name(dbt_date.next_month(tz), short=short) }}\n{%- endmacro -%}", + "depends_on": { + "macros": ["macro.dbt_date.month_name", "macro.dbt_date.next_month"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.513794, + "supported_languages": null + }, + "macro.dbt_date.next_month": { + "name": "next_month", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/next_month.sql", + "original_file_path": "macros/calendar_date/next_month.sql", + "unique_id": "macro.dbt_date.next_month", + "macro_sql": "{%- macro next_month(tz=None) -%}\n{{ dbt_date.n_months_away(1, tz) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.n_months_away"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5140018, + "supported_languages": null + }, + "macro.dbt_date.day_name": { + "name": "day_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_name.sql", + "original_file_path": "macros/calendar_date/day_name.sql", + "unique_id": "macro.dbt_date.day_name", + "macro_sql": "{%- macro day_name(date, short=True) -%}\n {{ adapter.dispatch('day_name', 'dbt_date') (date, short) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.snowflake__day_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.514669, + "supported_languages": null + }, + "macro.dbt_date.default__day_name": { + "name": "default__day_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_name.sql", + "original_file_path": "macros/calendar_date/day_name.sql", + "unique_id": "macro.dbt_date.default__day_name", + "macro_sql": "\n\n{%- macro default__day_name(date, short) -%}\n{%- set f = 'Dy' if short else 'Day' -%}\n to_char({{ date }}, '{{ f }}')\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.514884, + "supported_languages": null + }, + "macro.dbt_date.snowflake__day_name": { + "name": "snowflake__day_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_name.sql", + "original_file_path": "macros/calendar_date/day_name.sql", + "unique_id": "macro.dbt_date.snowflake__day_name", + "macro_sql": "\n\n{%- macro snowflake__day_name(date, short) -%}\n {%- if short -%}\n dayname({{ date }})\n {%- else -%}\n -- long version not implemented on Snowflake so we're doing it manually :/\n case dayname({{ date }})\n when 'Mon' then 'Monday'\n when 'Tue' then 'Tuesday'\n when 'Wed' then 'Wednesday'\n when 'Thu' then 'Thursday'\n when 'Fri' then 'Friday'\n when 'Sat' then 'Saturday'\n when 'Sun' then 'Sunday'\n end\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.515106, + "supported_languages": null + }, + "macro.dbt_date.bigquery__day_name": { + "name": "bigquery__day_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_name.sql", + "original_file_path": "macros/calendar_date/day_name.sql", + "unique_id": "macro.dbt_date.bigquery__day_name", + "macro_sql": "\n\n{%- macro bigquery__day_name(date, short) -%}\n{%- set f = '%a' if short else '%A' -%}\n format_date('{{ f }}', cast({{ date }} as date))\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.515423, + "supported_languages": null + }, + "macro.dbt_date.postgres__day_name": { + "name": "postgres__day_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_name.sql", + "original_file_path": "macros/calendar_date/day_name.sql", + "unique_id": "macro.dbt_date.postgres__day_name", + "macro_sql": "\n\n{%- macro postgres__day_name(date, short) -%}\n{# FM = Fill mode, which suppresses padding blanks #}\n{%- set f = 'FMDy' if short else 'FMDay' -%}\n to_char({{ date }}, '{{ f }}')\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.515646, + "supported_languages": null + }, + "macro.dbt_date.to_unixtimestamp": { + "name": "to_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/to_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/to_unixtimestamp.sql", + "unique_id": "macro.dbt_date.to_unixtimestamp", + "macro_sql": "{%- macro to_unixtimestamp(timestamp) -%}\n {{ adapter.dispatch('to_unixtimestamp', 'dbt_date') (timestamp) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.snowflake__to_unixtimestamp"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5161002, + "supported_languages": null + }, + "macro.dbt_date.default__to_unixtimestamp": { + "name": "default__to_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/to_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/to_unixtimestamp.sql", + "unique_id": "macro.dbt_date.default__to_unixtimestamp", + "macro_sql": "\n\n{%- macro default__to_unixtimestamp(timestamp) -%}\n {{ dbt_date.date_part('epoch', timestamp) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.516276, + "supported_languages": null + }, + "macro.dbt_date.snowflake__to_unixtimestamp": { + "name": "snowflake__to_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/to_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/to_unixtimestamp.sql", + "unique_id": "macro.dbt_date.snowflake__to_unixtimestamp", + "macro_sql": "\n\n{%- macro snowflake__to_unixtimestamp(timestamp) -%}\n {{ dbt_date.date_part('epoch_seconds', timestamp) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5164402, + "supported_languages": null + }, + "macro.dbt_date.bigquery__to_unixtimestamp": { + "name": "bigquery__to_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/to_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/to_unixtimestamp.sql", + "unique_id": "macro.dbt_date.bigquery__to_unixtimestamp", + "macro_sql": "\n\n{%- macro bigquery__to_unixtimestamp(timestamp) -%}\n unix_seconds({{ timestamp }})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.516555, + "supported_languages": null + }, + "macro.dbt_date.n_days_away": { + "name": "n_days_away", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/n_days_away.sql", + "original_file_path": "macros/calendar_date/n_days_away.sql", + "unique_id": "macro.dbt_date.n_days_away", + "macro_sql": "{%- macro n_days_away(n, date=None, tz=None) -%}\n{{ dbt_date.n_days_ago(-1 * n, date, tz) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.n_days_ago"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.51683, + "supported_languages": null + }, + "macro.dbt_date.week_start": { + "name": "week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_start.sql", + "original_file_path": "macros/calendar_date/week_start.sql", + "unique_id": "macro.dbt_date.week_start", + "macro_sql": "{%- macro week_start(date=None, tz=None) -%}\n{%-set dt = date if date else dbt_date.today(tz) -%}\n{{ adapter.dispatch('week_start', 'dbt_date') (dt) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": [ + "macro.dbt_date.today", + "macro.dbt_date.snowflake__week_start" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.517345, + "supported_languages": null + }, + "macro.dbt_date.default__week_start": { + "name": "default__week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_start.sql", + "original_file_path": "macros/calendar_date/week_start.sql", + "unique_id": "macro.dbt_date.default__week_start", + "macro_sql": "{%- macro default__week_start(date) -%}\ncast({{ dbt.date_trunc('week', date) }} as date)\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.date_trunc"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.517514, + "supported_languages": null + }, + "macro.dbt_date.snowflake__week_start": { + "name": "snowflake__week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_start.sql", + "original_file_path": "macros/calendar_date/week_start.sql", + "unique_id": "macro.dbt_date.snowflake__week_start", + "macro_sql": "\n\n{%- macro snowflake__week_start(date) -%}\n {#\n Get the day of week offset: e.g. if the date is a Sunday,\n dbt_date.day_of_week returns 1, so we subtract 1 to get a 0 offset\n #}\n {% set off_set = dbt_date.day_of_week(date, isoweek=False) ~ \" - 1\" %}\n cast({{ dbt.dateadd(\"day\", \"-1 * (\" ~ off_set ~ \")\", date) }} as date)\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.day_of_week", "macro.dbt.dateadd"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.517827, + "supported_languages": null + }, + "macro.dbt_date.postgres__week_start": { + "name": "postgres__week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_start.sql", + "original_file_path": "macros/calendar_date/week_start.sql", + "unique_id": "macro.dbt_date.postgres__week_start", + "macro_sql": "\n\n{%- macro postgres__week_start(date) -%}\n-- Sunday as week start date\ncast({{ dbt.dateadd('day', -1, dbt.date_trunc('week', dbt.dateadd('day', 1, date))) }} as date)\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.dateadd", "macro.dbt.date_trunc"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.518107, + "supported_languages": null + }, + "macro.dbt_date.iso_week_start": { + "name": "iso_week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_start.sql", + "original_file_path": "macros/calendar_date/iso_week_start.sql", + "unique_id": "macro.dbt_date.iso_week_start", + "macro_sql": "{%- macro iso_week_start(date=None, tz=None) -%}\n{%-set dt = date if date else dbt_date.today(tz) -%}\n{{ adapter.dispatch('iso_week_start', 'dbt_date') (dt) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": [ + "macro.dbt_date.today", + "macro.dbt_date.snowflake__iso_week_start" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.518618, + "supported_languages": null + }, + "macro.dbt_date._iso_week_start": { + "name": "_iso_week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_start.sql", + "original_file_path": "macros/calendar_date/iso_week_start.sql", + "unique_id": "macro.dbt_date._iso_week_start", + "macro_sql": "{%- macro _iso_week_start(date, week_type) -%}\ncast({{ dbt.date_trunc(week_type, date) }} as date)\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.date_trunc"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5187871, + "supported_languages": null + }, + "macro.dbt_date.default__iso_week_start": { + "name": "default__iso_week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_start.sql", + "original_file_path": "macros/calendar_date/iso_week_start.sql", + "unique_id": "macro.dbt_date.default__iso_week_start", + "macro_sql": "\n\n{%- macro default__iso_week_start(date) -%}\n{{ dbt_date._iso_week_start(date, 'isoweek') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_start"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.518935, + "supported_languages": null + }, + "macro.dbt_date.snowflake__iso_week_start": { + "name": "snowflake__iso_week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_start.sql", + "original_file_path": "macros/calendar_date/iso_week_start.sql", + "unique_id": "macro.dbt_date.snowflake__iso_week_start", + "macro_sql": "\n\n{%- macro snowflake__iso_week_start(date) -%}\n{{ dbt_date._iso_week_start(date, 'week') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_start"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.519082, + "supported_languages": null + }, + "macro.dbt_date.postgres__iso_week_start": { + "name": "postgres__iso_week_start", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_start.sql", + "original_file_path": "macros/calendar_date/iso_week_start.sql", + "unique_id": "macro.dbt_date.postgres__iso_week_start", + "macro_sql": "\n\n{%- macro postgres__iso_week_start(date) -%}\n{{ dbt_date._iso_week_start(date, 'week') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_start"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.519236, + "supported_languages": null + }, + "macro.dbt_date.n_days_ago": { + "name": "n_days_ago", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/n_days_ago.sql", + "original_file_path": "macros/calendar_date/n_days_ago.sql", + "unique_id": "macro.dbt_date.n_days_ago", + "macro_sql": "{%- macro n_days_ago(n, date=None, tz=None) -%}\n{%-set dt = date if date else dbt_date.today(tz) -%}\n{%- set n = n|int -%}\ncast({{ dbt.dateadd('day', -1 * n, dt) }} as date)\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.today", "macro.dbt.dateadd"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.519659, + "supported_languages": null + }, + "macro.dbt_date.last_week": { + "name": "last_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/last_week.sql", + "original_file_path": "macros/calendar_date/last_week.sql", + "unique_id": "macro.dbt_date.last_week", + "macro_sql": "{%- macro last_week(tz=None) -%}\n{{ dbt_date.n_weeks_ago(1, tz) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.n_weeks_ago"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.519865, + "supported_languages": null + }, + "macro.dbt_date.now": { + "name": "now", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/now.sql", + "original_file_path": "macros/calendar_date/now.sql", + "unique_id": "macro.dbt_date.now", + "macro_sql": "{%- macro now(tz=None) -%}\n{{ dbt_date.convert_timezone(dbt.current_timestamp(), tz) }}\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt_date.convert_timezone", + "macro.dbt.current_timestamp" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.520082, + "supported_languages": null + }, + "macro.dbt_date.periods_since": { + "name": "periods_since", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/periods_since.sql", + "original_file_path": "macros/calendar_date/periods_since.sql", + "unique_id": "macro.dbt_date.periods_since", + "macro_sql": "{%- macro periods_since(date_col, period_name='day', tz=None) -%}\n{{ dbt.datediff(date_col, dbt_date.now(tz), period_name) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.datediff", "macro.dbt_date.now"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5203578, + "supported_languages": null + }, + "macro.dbt_date.today": { + "name": "today", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/today.sql", + "original_file_path": "macros/calendar_date/today.sql", + "unique_id": "macro.dbt_date.today", + "macro_sql": "{%- macro today(tz=None) -%}\ncast({{ dbt_date.now(tz) }} as date)\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.now"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.520545, + "supported_languages": null + }, + "macro.dbt_date.last_month": { + "name": "last_month", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/last_month.sql", + "original_file_path": "macros/calendar_date/last_month.sql", + "unique_id": "macro.dbt_date.last_month", + "macro_sql": "{%- macro last_month(tz=None) -%}\n{{ dbt_date.n_months_ago(1, tz) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.n_months_ago"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.520736, + "supported_languages": null + }, + "macro.dbt_date.day_of_year": { + "name": "day_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_year.sql", + "original_file_path": "macros/calendar_date/day_of_year.sql", + "unique_id": "macro.dbt_date.day_of_year", + "macro_sql": "{%- macro day_of_year(date) -%}\n{{ adapter.dispatch('day_of_year', 'dbt_date') (date) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.default__day_of_year"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.52107, + "supported_languages": null + }, + "macro.dbt_date.default__day_of_year": { + "name": "default__day_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_year.sql", + "original_file_path": "macros/calendar_date/day_of_year.sql", + "unique_id": "macro.dbt_date.default__day_of_year", + "macro_sql": "\n\n{%- macro default__day_of_year(date) -%}\n {{ dbt_date.date_part('dayofyear', date) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.521333, + "supported_languages": null + }, + "macro.dbt_date.postgres__day_of_year": { + "name": "postgres__day_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_year.sql", + "original_file_path": "macros/calendar_date/day_of_year.sql", + "unique_id": "macro.dbt_date.postgres__day_of_year", + "macro_sql": "\n\n{%- macro postgres__day_of_year(date) -%}\n {{ dbt_date.date_part('doy', date) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.521487, + "supported_languages": null + }, + "macro.dbt_date.redshift__day_of_year": { + "name": "redshift__day_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_year.sql", + "original_file_path": "macros/calendar_date/day_of_year.sql", + "unique_id": "macro.dbt_date.redshift__day_of_year", + "macro_sql": "\n\n{%- macro redshift__day_of_year(date) -%}\n cast({{ dbt_date.date_part('dayofyear', date) }} as {{ dbt.type_bigint() }})\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt.type_bigint"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.521684, + "supported_languages": null + }, + "macro.dbt_date.round_timestamp": { + "name": "round_timestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/round_timestamp.sql", + "original_file_path": "macros/calendar_date/round_timestamp.sql", + "unique_id": "macro.dbt_date.round_timestamp", + "macro_sql": "{% macro round_timestamp(timestamp) %}\n {{ dbt.date_trunc(\"day\", dbt.dateadd(\"hour\", 12, timestamp)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.date_trunc", "macro.dbt.dateadd"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.521953, + "supported_languages": null + }, + "macro.dbt_date.from_unixtimestamp": { + "name": "from_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/from_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/from_unixtimestamp.sql", + "unique_id": "macro.dbt_date.from_unixtimestamp", + "macro_sql": "{%- macro from_unixtimestamp(epochs, format=\"seconds\") -%}\n {{ adapter.dispatch('from_unixtimestamp', 'dbt_date') (epochs, format) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.snowflake__from_unixtimestamp"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.523566, + "supported_languages": null + }, + "macro.dbt_date.default__from_unixtimestamp": { + "name": "default__from_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/from_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/from_unixtimestamp.sql", + "unique_id": "macro.dbt_date.default__from_unixtimestamp", + "macro_sql": "\n\n{%- macro default__from_unixtimestamp(epochs, format=\"seconds\") -%}\n {%- if format != \"seconds\" -%}\n {{ exceptions.raise_compiler_error(\n \"value \" ~ format ~ \" for `format` for from_unixtimestamp is not supported.\"\n )\n }}\n {% endif -%}\n to_timestamp({{ epochs }})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.523894, + "supported_languages": null + }, + "macro.dbt_date.postgres__from_unixtimestamp": { + "name": "postgres__from_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/from_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/from_unixtimestamp.sql", + "unique_id": "macro.dbt_date.postgres__from_unixtimestamp", + "macro_sql": "\n\n{%- macro postgres__from_unixtimestamp(epochs, format=\"seconds\") -%}\n {%- if format != \"seconds\" -%}\n {{ exceptions.raise_compiler_error(\n \"value \" ~ format ~ \" for `format` for from_unixtimestamp is not supported.\"\n )\n }}\n {% endif -%}\n cast(to_timestamp({{ epochs }}) at time zone 'UTC' as timestamp)\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.52433, + "supported_languages": null + }, + "macro.dbt_date.snowflake__from_unixtimestamp": { + "name": "snowflake__from_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/from_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/from_unixtimestamp.sql", + "unique_id": "macro.dbt_date.snowflake__from_unixtimestamp", + "macro_sql": "\n\n{%- macro snowflake__from_unixtimestamp(epochs, format) -%}\n {%- if format == \"seconds\" -%}\n {%- set scale = 0 -%}\n {%- elif format == \"milliseconds\" -%}\n {%- set scale = 3 -%}\n {%- elif format == \"microseconds\" -%}\n {%- set scale = 6 -%}\n {%- else -%}\n {{ exceptions.raise_compiler_error(\n \"value \" ~ format ~ \" for `format` for from_unixtimestamp is not supported.\"\n )\n }}\n {% endif -%}\n to_timestamp_ntz({{ epochs }}, {{ scale }})\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5248692, + "supported_languages": null + }, + "macro.dbt_date.bigquery__from_unixtimestamp": { + "name": "bigquery__from_unixtimestamp", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/from_unixtimestamp.sql", + "original_file_path": "macros/calendar_date/from_unixtimestamp.sql", + "unique_id": "macro.dbt_date.bigquery__from_unixtimestamp", + "macro_sql": "\n\n{%- macro bigquery__from_unixtimestamp(epochs, format) -%}\n {%- if format == \"seconds\" -%}\n timestamp_seconds({{ epochs }})\n {%- elif format == \"milliseconds\" -%}\n timestamp_millis({{ epochs }})\n {%- elif format == \"microseconds\" -%}\n timestamp_micros({{ epochs }})\n {%- else -%}\n {{ exceptions.raise_compiler_error(\n \"value \" ~ format ~ \" for `format` for from_unixtimestamp is not supported.\"\n )\n }}\n {% endif -%}\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.525286, + "supported_languages": null + }, + "macro.dbt_date.n_months_ago": { + "name": "n_months_ago", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/n_months_ago.sql", + "original_file_path": "macros/calendar_date/n_months_ago.sql", + "unique_id": "macro.dbt_date.n_months_ago", + "macro_sql": "{%- macro n_months_ago(n, tz=None) -%}\n{%- set n = n|int -%}\n{{ dbt.date_trunc('month',\n dbt.dateadd('month', -1 * n,\n dbt_date.today(tz)\n )\n ) }}\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt.date_trunc", + "macro.dbt.dateadd", + "macro.dbt_date.today" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.525692, + "supported_languages": null + }, + "macro.dbt_date.date_part": { + "name": "date_part", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/date_part.sql", + "original_file_path": "macros/calendar_date/date_part.sql", + "unique_id": "macro.dbt_date.date_part", + "macro_sql": "{% macro date_part(datepart, date) -%}\n {{ adapter.dispatch('date_part', 'dbt_date') (datepart, date) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.default__date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.526002, + "supported_languages": null + }, + "macro.dbt_date.default__date_part": { + "name": "default__date_part", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/date_part.sql", + "original_file_path": "macros/calendar_date/date_part.sql", + "unique_id": "macro.dbt_date.default__date_part", + "macro_sql": "{% macro default__date_part(datepart, date) -%}\n date_part('{{ datepart }}', {{ date }})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.52614, + "supported_languages": null + }, + "macro.dbt_date.bigquery__date_part": { + "name": "bigquery__date_part", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/date_part.sql", + "original_file_path": "macros/calendar_date/date_part.sql", + "unique_id": "macro.dbt_date.bigquery__date_part", + "macro_sql": "{% macro bigquery__date_part(datepart, date) -%}\n extract({{ datepart }} from {{ date }})\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.526274, + "supported_languages": null + }, + "macro.dbt_date.n_weeks_away": { + "name": "n_weeks_away", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/n_weeks_away.sql", + "original_file_path": "macros/calendar_date/n_weeks_away.sql", + "unique_id": "macro.dbt_date.n_weeks_away", + "macro_sql": "{%- macro n_weeks_away(n, tz=None) -%}\n{%- set n = n|int -%}\n{{ dbt.date_trunc('week',\n dbt.dateadd('week', n,\n dbt_date.today(tz)\n )\n ) }}\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt.date_trunc", + "macro.dbt.dateadd", + "macro.dbt_date.today" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.52663, + "supported_languages": null + }, + "macro.dbt_date.day_of_month": { + "name": "day_of_month", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_month.sql", + "original_file_path": "macros/calendar_date/day_of_month.sql", + "unique_id": "macro.dbt_date.day_of_month", + "macro_sql": "{%- macro day_of_month(date) -%}\n{{ dbt_date.date_part('day', date) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.526848, + "supported_languages": null + }, + "macro.dbt_date.redshift__day_of_month": { + "name": "redshift__day_of_month", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_month.sql", + "original_file_path": "macros/calendar_date/day_of_month.sql", + "unique_id": "macro.dbt_date.redshift__day_of_month", + "macro_sql": "\n\n{%- macro redshift__day_of_month(date) -%}\ncast({{ dbt_date.date_part('day', date) }} as {{ dbt.type_bigint() }})\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt.type_bigint"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.527036, + "supported_languages": null + }, + "macro.dbt_date.yesterday": { + "name": "yesterday", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/yesterday.sql", + "original_file_path": "macros/calendar_date/yesterday.sql", + "unique_id": "macro.dbt_date.yesterday", + "macro_sql": "{%- macro yesterday(date=None, tz=None) -%}\n{{ dbt_date.n_days_ago(1, date, tz) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.n_days_ago"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5272572, + "supported_languages": null + }, + "macro.dbt_date.day_of_week": { + "name": "day_of_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_week.sql", + "original_file_path": "macros/calendar_date/day_of_week.sql", + "unique_id": "macro.dbt_date.day_of_week", + "macro_sql": "{%- macro day_of_week(date, isoweek=true) -%}\n{{ adapter.dispatch('day_of_week', 'dbt_date') (date, isoweek) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.snowflake__day_of_week"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5290802, + "supported_languages": null + }, + "macro.dbt_date.default__day_of_week": { + "name": "default__day_of_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_week.sql", + "original_file_path": "macros/calendar_date/day_of_week.sql", + "unique_id": "macro.dbt_date.default__day_of_week", + "macro_sql": "\n\n{%- macro default__day_of_week(date, isoweek) -%}\n\n {%- set dow = dbt_date.date_part('dayofweek', date) -%}\n\n {%- if isoweek -%}\n case\n -- Shift start of week from Sunday (0) to Monday (1)\n when {{ dow }} = 0 then 7\n else {{ dow }}\n end\n {%- else -%}\n {{ dow }} + 1\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5293992, + "supported_languages": null + }, + "macro.dbt_date.snowflake__day_of_week": { + "name": "snowflake__day_of_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_week.sql", + "original_file_path": "macros/calendar_date/day_of_week.sql", + "unique_id": "macro.dbt_date.snowflake__day_of_week", + "macro_sql": "\n\n{%- macro snowflake__day_of_week(date, isoweek) -%}\n\n {%- if isoweek -%}\n {%- set dow_part = 'dayofweekiso' -%}\n {{ dbt_date.date_part(dow_part, date) }}\n {%- else -%}\n {%- set dow_part = 'dayofweek' -%}\n case\n when {{ dbt_date.date_part(dow_part, date) }} = 7 then 1\n else {{ dbt_date.date_part(dow_part, date) }} + 1\n end\n {%- endif -%}\n\n\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.529827, + "supported_languages": null + }, + "macro.dbt_date.bigquery__day_of_week": { + "name": "bigquery__day_of_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_week.sql", + "original_file_path": "macros/calendar_date/day_of_week.sql", + "unique_id": "macro.dbt_date.bigquery__day_of_week", + "macro_sql": "\n\n{%- macro bigquery__day_of_week(date, isoweek) -%}\n\n {%- set dow = dbt_date.date_part('dayofweek', date) -%}\n\n {%- if isoweek -%}\n case\n -- Shift start of week from Sunday (1) to Monday (2)\n when {{ dow }} = 1 then 7\n else {{ dow }} - 1\n end\n {%- else -%}\n {{ dow }}\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.date_part"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.530139, + "supported_languages": null + }, + "macro.dbt_date.postgres__day_of_week": { + "name": "postgres__day_of_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_week.sql", + "original_file_path": "macros/calendar_date/day_of_week.sql", + "unique_id": "macro.dbt_date.postgres__day_of_week", + "macro_sql": "\n\n\n{%- macro postgres__day_of_week(date, isoweek) -%}\n\n {%- if isoweek -%}\n {%- set dow_part = 'isodow' -%}\n -- Monday(1) to Sunday (7)\n cast({{ dbt_date.date_part(dow_part, date) }} as {{ dbt.type_int() }})\n {%- else -%}\n {%- set dow_part = 'dow' -%}\n -- Sunday(1) to Saturday (7)\n cast({{ dbt_date.date_part(dow_part, date) }} + 1 as {{ dbt.type_int() }})\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt.type_int"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.530682, + "supported_languages": null + }, + "macro.dbt_date.redshift__day_of_week": { + "name": "redshift__day_of_week", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/day_of_week.sql", + "original_file_path": "macros/calendar_date/day_of_week.sql", + "unique_id": "macro.dbt_date.redshift__day_of_week", + "macro_sql": "\n\n\n{%- macro redshift__day_of_week(date, isoweek) -%}\n\n {%- set dow = dbt_date.date_part('dayofweek', date) -%}\n\n {%- if isoweek -%}\n case\n -- Shift start of week from Sunday (0) to Monday (1)\n when {{ dow }} = 0 then 7\n else cast({{ dow }} as {{ dbt.type_bigint() }})\n end\n {%- else -%}\n cast({{ dow }} + 1 as {{ dbt.type_bigint() }})\n {%- endif -%}\n\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt.type_bigint"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.531071, + "supported_languages": null + }, + "macro.dbt_date.iso_week_end": { + "name": "iso_week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_end.sql", + "original_file_path": "macros/calendar_date/iso_week_end.sql", + "unique_id": "macro.dbt_date.iso_week_end", + "macro_sql": "{%- macro iso_week_end(date=None, tz=None) -%}\n{%-set dt = date if date else dbt_date.today(tz) -%}\n{{ adapter.dispatch('iso_week_end', 'dbt_date') (dt) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": [ + "macro.dbt_date.today", + "macro.dbt_date.snowflake__iso_week_end" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5315561, + "supported_languages": null + }, + "macro.dbt_date._iso_week_end": { + "name": "_iso_week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_end.sql", + "original_file_path": "macros/calendar_date/iso_week_end.sql", + "unique_id": "macro.dbt_date._iso_week_end", + "macro_sql": "{%- macro _iso_week_end(date, week_type) -%}\n{%- set dt = dbt_date.iso_week_start(date) -%}\n{{ dbt_date.n_days_away(6, dt) }}\n{%- endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_date.iso_week_start", + "macro.dbt_date.n_days_away" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.531778, + "supported_languages": null + }, + "macro.dbt_date.default__iso_week_end": { + "name": "default__iso_week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_end.sql", + "original_file_path": "macros/calendar_date/iso_week_end.sql", + "unique_id": "macro.dbt_date.default__iso_week_end", + "macro_sql": "\n\n{%- macro default__iso_week_end(date) -%}\n{{ dbt_date._iso_week_end(date, 'isoweek') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_end"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.531924, + "supported_languages": null + }, + "macro.dbt_date.snowflake__iso_week_end": { + "name": "snowflake__iso_week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_end.sql", + "original_file_path": "macros/calendar_date/iso_week_end.sql", + "unique_id": "macro.dbt_date.snowflake__iso_week_end", + "macro_sql": "\n\n{%- macro snowflake__iso_week_end(date) -%}\n{{ dbt_date._iso_week_end(date, 'weekiso') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_end"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.532064, + "supported_languages": null + }, + "macro.dbt_date.n_weeks_ago": { + "name": "n_weeks_ago", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/n_weeks_ago.sql", + "original_file_path": "macros/calendar_date/n_weeks_ago.sql", + "unique_id": "macro.dbt_date.n_weeks_ago", + "macro_sql": "{%- macro n_weeks_ago(n, tz=None) -%}\n{%- set n = n|int -%}\n{{ dbt.date_trunc('week',\n dbt.dateadd('week', -1 * n,\n dbt_date.today(tz)\n )\n ) }}\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt.date_trunc", + "macro.dbt.dateadd", + "macro.dbt_date.today" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5325592, + "supported_languages": null + }, + "macro.dbt_date.month_name": { + "name": "month_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/month_name.sql", + "original_file_path": "macros/calendar_date/month_name.sql", + "unique_id": "macro.dbt_date.month_name", + "macro_sql": "{%- macro month_name(date, short=True) -%}\n {{ adapter.dispatch('month_name', 'dbt_date') (date, short) }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date.snowflake__month_name"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5331998, + "supported_languages": null + }, + "macro.dbt_date.default__month_name": { + "name": "default__month_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/month_name.sql", + "original_file_path": "macros/calendar_date/month_name.sql", + "unique_id": "macro.dbt_date.default__month_name", + "macro_sql": "\n\n{%- macro default__month_name(date, short) -%}\n{%- set f = 'MON' if short else 'MONTH' -%}\n to_char({{ date }}, '{{ f }}')\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.533452, + "supported_languages": null + }, + "macro.dbt_date.bigquery__month_name": { + "name": "bigquery__month_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/month_name.sql", + "original_file_path": "macros/calendar_date/month_name.sql", + "unique_id": "macro.dbt_date.bigquery__month_name", + "macro_sql": "\n\n{%- macro bigquery__month_name(date, short) -%}\n{%- set f = '%b' if short else '%B' -%}\n format_date('{{ f }}', cast({{ date }} as date))\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5336692, + "supported_languages": null + }, + "macro.dbt_date.snowflake__month_name": { + "name": "snowflake__month_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/month_name.sql", + "original_file_path": "macros/calendar_date/month_name.sql", + "unique_id": "macro.dbt_date.snowflake__month_name", + "macro_sql": "\n\n{%- macro snowflake__month_name(date, short) -%}\n{%- set f = 'MON' if short else 'MMMM' -%}\n to_char({{ date }}, '{{ f }}')\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5338829, + "supported_languages": null + }, + "macro.dbt_date.postgres__month_name": { + "name": "postgres__month_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/month_name.sql", + "original_file_path": "macros/calendar_date/month_name.sql", + "unique_id": "macro.dbt_date.postgres__month_name", + "macro_sql": "\n\n{%- macro postgres__month_name(date, short) -%}\n{# FM = Fill mode, which suppresses padding blanks #}\n{%- set f = 'FMMon' if short else 'FMMonth' -%}\n to_char({{ date }}, '{{ f }}')\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.534102, + "supported_languages": null + }, + "macro.dbt_date.last_month_name": { + "name": "last_month_name", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/last_month_name.sql", + "original_file_path": "macros/calendar_date/last_month_name.sql", + "unique_id": "macro.dbt_date.last_month_name", + "macro_sql": "{%- macro last_month_name(short=True, tz=None) -%}\n{{ dbt_date.month_name(dbt_date.last_month(tz), short=short) }}\n{%- endmacro -%}", + "depends_on": { + "macros": ["macro.dbt_date.month_name", "macro.dbt_date.last_month"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.534376, + "supported_languages": null + }, + "macro.dbt_date.week_of_year": { + "name": "week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_of_year.sql", + "original_file_path": "macros/calendar_date/week_of_year.sql", + "unique_id": "macro.dbt_date.week_of_year", + "macro_sql": "{%- macro week_of_year(date=None, tz=None) -%}\n{%-set dt = date if date else dbt_date.today(tz) -%}\n{{ adapter.dispatch('week_of_year', 'dbt_date') (dt) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": [ + "macro.dbt_date.today", + "macro.dbt_date.default__week_of_year" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5348191, + "supported_languages": null + }, + "macro.dbt_date.default__week_of_year": { + "name": "default__week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_of_year.sql", + "original_file_path": "macros/calendar_date/week_of_year.sql", + "unique_id": "macro.dbt_date.default__week_of_year", + "macro_sql": "{%- macro default__week_of_year(date) -%}\ncast({{ dbt_date.date_part('week', date) }} as {{ dbt.type_int() }})\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt.type_int"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.535025, + "supported_languages": null + }, + "macro.dbt_date.postgres__week_of_year": { + "name": "postgres__week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_of_year.sql", + "original_file_path": "macros/calendar_date/week_of_year.sql", + "unique_id": "macro.dbt_date.postgres__week_of_year", + "macro_sql": "\n\n{%- macro postgres__week_of_year(date) -%}\n{# postgresql 'week' returns isoweek. Use to_char instead.\n WW = the first week starts on the first day of the year #}\ncast(to_char({{ date }}, 'WW') as {{ dbt.type_int() }})\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.type_int"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5351849, + "supported_languages": null + }, + "macro.dbt_date.convert_timezone": { + "name": "convert_timezone", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/convert_timezone.sql", + "original_file_path": "macros/calendar_date/convert_timezone.sql", + "unique_id": "macro.dbt_date.convert_timezone", + "macro_sql": "{%- macro convert_timezone(column, target_tz=None, source_tz=None) -%}\n{%- set source_tz = \"UTC\" if not source_tz else source_tz -%}\n{%- set target_tz = var(\"dbt_date:time_zone\") if not target_tz else target_tz -%}\n{{ adapter.dispatch('convert_timezone', 'dbt_date') (column, target_tz, source_tz) }}\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt_date.default__convert_timezone"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.536145, + "supported_languages": null + }, + "macro.dbt_date.default__convert_timezone": { + "name": "default__convert_timezone", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/convert_timezone.sql", + "original_file_path": "macros/calendar_date/convert_timezone.sql", + "unique_id": "macro.dbt_date.default__convert_timezone", + "macro_sql": "{% macro default__convert_timezone(column, target_tz, source_tz) -%}\nconvert_timezone('{{ source_tz }}', '{{ target_tz }}',\n cast({{ column }} as {{ dbt.type_timestamp() }})\n)\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.type_timestamp"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.53636, + "supported_languages": null + }, + "macro.dbt_date.bigquery__convert_timezone": { + "name": "bigquery__convert_timezone", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/convert_timezone.sql", + "original_file_path": "macros/calendar_date/convert_timezone.sql", + "unique_id": "macro.dbt_date.bigquery__convert_timezone", + "macro_sql": "{%- macro bigquery__convert_timezone(column, target_tz, source_tz=None) -%}\ntimestamp(datetime({{ column }}, '{{ target_tz}}'))\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.536541, + "supported_languages": null + }, + "macro.dbt_date.spark__convert_timezone": { + "name": "spark__convert_timezone", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/convert_timezone.sql", + "original_file_path": "macros/calendar_date/convert_timezone.sql", + "unique_id": "macro.dbt_date.spark__convert_timezone", + "macro_sql": "{%- macro spark__convert_timezone(column, target_tz, source_tz) -%}\nfrom_utc_timestamp(\n to_utc_timestamp({{ column }}, '{{ source_tz }}'),\n '{{ target_tz }}'\n )\n{%- endmacro -%}\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.536753, + "supported_languages": null + }, + "macro.dbt_date.postgres__convert_timezone": { + "name": "postgres__convert_timezone", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/convert_timezone.sql", + "original_file_path": "macros/calendar_date/convert_timezone.sql", + "unique_id": "macro.dbt_date.postgres__convert_timezone", + "macro_sql": "{% macro postgres__convert_timezone(column, target_tz, source_tz) -%}\ncast(\n cast({{ column }} as {{ dbt.type_timestamp() }})\n at time zone '{{ source_tz }}' at time zone '{{ target_tz }}' as {{ dbt.type_timestamp() }}\n)\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt.type_timestamp"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5370188, + "supported_languages": null + }, + "macro.dbt_date.redshift__convert_timezone": { + "name": "redshift__convert_timezone", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/convert_timezone.sql", + "original_file_path": "macros/calendar_date/convert_timezone.sql", + "unique_id": "macro.dbt_date.redshift__convert_timezone", + "macro_sql": "{%- macro redshift__convert_timezone(column, target_tz, source_tz) -%}\n{{ return(dbt_date.default__convert_timezone(column, target_tz, source_tz)) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_date.default__convert_timezone"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.537227, + "supported_languages": null + }, + "macro.dbt_date.n_months_away": { + "name": "n_months_away", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/n_months_away.sql", + "original_file_path": "macros/calendar_date/n_months_away.sql", + "unique_id": "macro.dbt_date.n_months_away", + "macro_sql": "{%- macro n_months_away(n, tz=None) -%}\n{%- set n = n|int -%}\n{{ dbt.date_trunc('month',\n dbt.dateadd('month', n,\n dbt_date.today(tz)\n )\n ) }}\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt.date_trunc", + "macro.dbt.dateadd", + "macro.dbt_date.today" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5376158, + "supported_languages": null + }, + "macro.dbt_date.iso_week_of_year": { + "name": "iso_week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_of_year.sql", + "original_file_path": "macros/calendar_date/iso_week_of_year.sql", + "unique_id": "macro.dbt_date.iso_week_of_year", + "macro_sql": "{%- macro iso_week_of_year(date=None, tz=None) -%}\n{%-set dt = date if date else dbt_date.today(tz) -%}\n{{ adapter.dispatch('iso_week_of_year', 'dbt_date') (dt) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": [ + "macro.dbt_date.today", + "macro.dbt_date.snowflake__iso_week_of_year" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5382981, + "supported_languages": null + }, + "macro.dbt_date._iso_week_of_year": { + "name": "_iso_week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_of_year.sql", + "original_file_path": "macros/calendar_date/iso_week_of_year.sql", + "unique_id": "macro.dbt_date._iso_week_of_year", + "macro_sql": "{%- macro _iso_week_of_year(date, week_type) -%}\ncast({{ dbt_date.date_part(week_type, date) }} as {{ dbt.type_int() }})\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt.type_int"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.53851, + "supported_languages": null + }, + "macro.dbt_date.default__iso_week_of_year": { + "name": "default__iso_week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_of_year.sql", + "original_file_path": "macros/calendar_date/iso_week_of_year.sql", + "unique_id": "macro.dbt_date.default__iso_week_of_year", + "macro_sql": "\n\n{%- macro default__iso_week_of_year(date) -%}\n{{ dbt_date._iso_week_of_year(date, 'isoweek') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_of_year"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.538659, + "supported_languages": null + }, + "macro.dbt_date.snowflake__iso_week_of_year": { + "name": "snowflake__iso_week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_of_year.sql", + "original_file_path": "macros/calendar_date/iso_week_of_year.sql", + "unique_id": "macro.dbt_date.snowflake__iso_week_of_year", + "macro_sql": "\n\n{%- macro snowflake__iso_week_of_year(date) -%}\n{{ dbt_date._iso_week_of_year(date, 'weekiso') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_of_year"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5388072, + "supported_languages": null + }, + "macro.dbt_date.postgres__iso_week_of_year": { + "name": "postgres__iso_week_of_year", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/iso_week_of_year.sql", + "original_file_path": "macros/calendar_date/iso_week_of_year.sql", + "unique_id": "macro.dbt_date.postgres__iso_week_of_year", + "macro_sql": "\n\n{%- macro postgres__iso_week_of_year(date) -%}\n-- postgresql week is isoweek, the first week of a year containing January 4 of that year.\n{{ dbt_date._iso_week_of_year(date, 'week') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_date._iso_week_of_year"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.53896, + "supported_languages": null + }, + "macro.dbt_date.week_end": { + "name": "week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_end.sql", + "original_file_path": "macros/calendar_date/week_end.sql", + "unique_id": "macro.dbt_date.week_end", + "macro_sql": "{%- macro week_end(date=None, tz=None) -%}\n{%-set dt = date if date else dbt_date.today(tz) -%}\n{{ adapter.dispatch('week_end', 'dbt_date') (dt) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": ["macro.dbt_date.today", "macro.dbt_date.snowflake__week_end"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5394502, + "supported_languages": null + }, + "macro.dbt_date.default__week_end": { + "name": "default__week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_end.sql", + "original_file_path": "macros/calendar_date/week_end.sql", + "unique_id": "macro.dbt_date.default__week_end", + "macro_sql": "{%- macro default__week_end(date) -%}\n{{ last_day(date, 'week') }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt.last_day"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5396, + "supported_languages": null + }, + "macro.dbt_date.snowflake__week_end": { + "name": "snowflake__week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_end.sql", + "original_file_path": "macros/calendar_date/week_end.sql", + "unique_id": "macro.dbt_date.snowflake__week_end", + "macro_sql": "\n\n{%- macro snowflake__week_end(date) -%}\n{%- set dt = dbt_date.week_start(date) -%}\n{{ dbt_date.n_days_away(6, dt) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.week_start", "macro.dbt_date.n_days_away"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.539812, + "supported_languages": null + }, + "macro.dbt_date.postgres__week_end": { + "name": "postgres__week_end", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/week_end.sql", + "original_file_path": "macros/calendar_date/week_end.sql", + "unique_id": "macro.dbt_date.postgres__week_end", + "macro_sql": "\n\n{%- macro postgres__week_end(date) -%}\n{%- set dt = dbt_date.week_start(date) -%}\n{{ dbt_date.n_days_away(6, dt) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_date.week_start", "macro.dbt_date.n_days_away"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5400178, + "supported_languages": null + }, + "macro.dbt_date.next_month_number": { + "name": "next_month_number", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/next_month_number.sql", + "original_file_path": "macros/calendar_date/next_month_number.sql", + "unique_id": "macro.dbt_date.next_month_number", + "macro_sql": "{%- macro next_month_number(tz=None) -%}\n{{ dbt_date.date_part('month', dbt_date.next_month(tz)) }}\n{%- endmacro -%}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt_date.next_month"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.540262, + "supported_languages": null + }, + "macro.dbt_date.last_month_number": { + "name": "last_month_number", + "resource_type": "macro", + "package_name": "dbt_date", + "path": "macros/calendar_date/last_month_number.sql", + "original_file_path": "macros/calendar_date/last_month_number.sql", + "unique_id": "macro.dbt_date.last_month_number", + "macro_sql": "{%- macro last_month_number(tz=None) -%}\n{{ dbt_date.date_part('month', dbt_date.last_month(tz)) }}\n{%- endmacro -%}", + "depends_on": { + "macros": ["macro.dbt_date.date_part", "macro.dbt_date.last_month"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5404868, + "supported_languages": null + }, + "macro.dbt_external_tables.bigquery__create_external_table": { + "name": "bigquery__create_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/bigquery/create_external_table.sql", + "original_file_path": "macros/plugins/bigquery/create_external_table.sql", + "unique_id": "macro.dbt_external_tables.bigquery__create_external_table", + "macro_sql": "{% macro bigquery__create_external_table(source_node) %}\n\n {%- set columns = source_node.columns.values() -%}\n {%- set external = source_node.external -%}\n {%- set partitions = external.partitions -%}\n {%- set options = external.options -%}\n\n {% if options is mapping and options.get('connection_name', none) %}\n {% set connection_name = options.pop('connection_name') %}\n {% endif %}\n \n {%- set uris = [] -%}\n {%- if options is mapping and options.get('uris', none) -%}\n {%- set uris = external.options.get('uris') -%}\n {%- else -%}\n {%- set uris = [external.location] -%}\n {%- endif -%}\n\n create or replace external table {{source(source_node.source_name, source_node.name)}}\n {%- if columns -%}(\n {% for column in columns %}\n {%- set column_quoted = adapter.quote(column.name) if column.quote else column.name %}\n {{column_quoted}} {{column.data_type}} {{- ',' if not loop.last -}}\n {%- endfor -%}\n )\n {% endif %}\n {% if options and options.get('hive_partition_uri_prefix', none) %}\n with partition columns {%- if partitions %} (\n {%- for partition in partitions %}\n {{partition.name}} {{partition.data_type}}{{',' if not loop.last}}\n {%- endfor -%}\n ) {% endif -%}\n {% endif %}\n {% if connection_name %}\n with connection `{{ connection_name }}`\n {% endif %}\n options (\n uris = [{%- for uri in uris -%} '{{uri}}' {{- \",\" if not loop.last}} {%- endfor -%}]\n {%- if options is mapping -%}\n {%- for key, value in options.items() if key != 'uris' %}\n {%- if value is string -%}\n , {{key}} = '{{value}}'\n {%- else -%}\n , {{key}} = {{value}}\n {%- endif -%}\n {%- endfor -%}\n {%- endif -%}\n )\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5443351, + "supported_languages": null + }, + "macro.dbt_external_tables.bigquery__create_external_schema": { + "name": "bigquery__create_external_schema", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/bigquery/create_external_schema.sql", + "original_file_path": "macros/plugins/bigquery/create_external_schema.sql", + "unique_id": "macro.dbt_external_tables.bigquery__create_external_schema", + "macro_sql": "{%- macro bigquery__create_external_schema(source_node) -%}\n {%- set fqn -%}\n {%- if source_node.database -%}\n `{{ source_node.database }}`.{{ source_node.schema }}\n {%- else -%}\n {{ source_node.schema }}\n {%- endif -%}\n {%- endset -%}\n\n {% set schema_exists_query %}\n select * from {{ source_node.database }}.INFORMATION_SCHEMA.SCHEMATA where schema_name = '{{ source_node.schema }}' limit 1\n {% endset %}\n {% if execute %}\n {% set schema_exists = run_query(schema_exists_query)|length > 0 %}\n {% else %}\n {% set schema_exists = false %}\n {% endif %} \n\n {%- if not schema_exists -%}\n {%- set ddl -%}\n create schema if not exists {{ fqn }}\n {%- endset -%}\n {{ return(ddl) }}\n {%- else -%}\n {{ return('') }}\n {% endif %} \n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.545729, + "supported_languages": null + }, + "macro.dbt_external_tables.bigquery__get_external_build_plan": { + "name": "bigquery__get_external_build_plan", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/bigquery/get_external_build_plan.sql", + "original_file_path": "macros/plugins/bigquery/get_external_build_plan.sql", + "unique_id": "macro.dbt_external_tables.bigquery__get_external_build_plan", + "macro_sql": "{% macro bigquery__get_external_build_plan(source_node) %}\n\n {% set build_plan = [] %}\n \n {% set old_relation = adapter.get_relation(\n database = source_node.database,\n schema = source_node.schema,\n identifier = source_node.identifier\n ) %}\n \n {% set create_or_replace = (old_relation is none or var('ext_full_refresh', false)) %}\n\n {% if create_or_replace %}\n {% if not dbt_external_tables.create_external_schema(source_node)|length %}\n {% set build_plan = build_plan + [\n dbt_external_tables.create_external_table(source_node)\n ] %}\n {% else %}\n {% set build_plan = build_plan + [\n dbt_external_tables.create_external_schema(source_node),\n dbt_external_tables.create_external_table(source_node)\n ] %}\n {% endif %}\n {% else %}\n {% set build_plan = build_plan + dbt_external_tables.refresh_external_table(source_node) %}\n {% endif %}\n\n {% do return(build_plan) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.create_external_schema", + "macro.dbt_external_tables.create_external_table", + "macro.dbt_external_tables.refresh_external_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.547023, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake__create_external_table": { + "name": "snowflake__create_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/create_external_table.sql", + "original_file_path": "macros/plugins/snowflake/create_external_table.sql", + "unique_id": "macro.dbt_external_tables.snowflake__create_external_table", + "macro_sql": "{% macro snowflake__create_external_table(source_node) %}\n\n {%- set columns = source_node.columns.values() -%}\n {%- set external = source_node.external -%}\n {%- set partitions = external.partitions -%}\n {%- set infer_schema = external.infer_schema -%}\n\n {% if infer_schema %}\n {% set query_infer_schema %}\n select * from table( infer_schema( location=>'{{external.location}}', file_format=>'{{external.file_format}}') )\n {% endset %}\n {% if execute %}\n {% set columns_infer = run_query(query_infer_schema) %}\n {% endif %}\n {% endif %}\n\n {%- set is_csv = dbt_external_tables.is_csv(external.file_format) -%}\n\n{# https://docs.snowflake.net/manuals/sql-reference/sql/create-external-table.html #}\n{# This assumes you have already created an external stage #}\n create or replace external table {{source(source_node.source_name, source_node.name)}}\n {%- if columns or partitions or infer_schema -%}\n (\n {%- if partitions -%}{%- for partition in partitions %}\n {{partition.name}} {{partition.data_type}} as {{partition.expression}}{{- ',' if not loop.last or columns|length > 0 or infer_schema -}}\n {%- endfor -%}{%- endif -%}\n {%- if not infer_schema -%}\n {%- for column in columns %}\n {%- set column_quoted = adapter.quote(column.name) if column.quote else column.name %}\n {%- set col_expression -%}\n {%- set col_id = 'value:c' ~ loop.index if is_csv else 'value:' ~ column_quoted -%}\n (case when is_null_value({{col_id}}) or lower({{col_id}}) = 'null' then null else {{col_id}} end)\n {%- endset %}\n {{column_quoted}} {{column.data_type}} as ({{col_expression}}::{{column.data_type}})\n {{- ',' if not loop.last -}}\n {% endfor %}\n {% else %}\n {%- for column in columns_infer %}\n {%- set col_expression -%}\n {%- set col_id = 'value:' ~ column[0] -%}\n (case when is_null_value({{col_id}}) or lower({{col_id}}) = 'null' then null else {{col_id}} end)\n {%- endset %}\n {{column[0]}} {{column[1]}} as ({{col_expression}}::{{column[1]}})\n {{- ',' if not loop.last -}}\n {% endfor %}\n {%- endif -%}\n )\n {%- endif -%}\n {% if partitions %} partition by ({{partitions|map(attribute='name')|join(', ')}}) {% endif %}\n location = {{external.location}} {# stage #}\n {% if external.auto_refresh in (true, false) -%}\n auto_refresh = {{external.auto_refresh}}\n {%- endif %}\n {% if external.pattern -%} pattern = '{{external.pattern}}' {%- endif %}\n {% if external.integration -%} integration = '{{external.integration}}' {%- endif %}\n file_format = {{external.file_format}}\n {% if external.table_format -%} table_format = '{{external.table_format}}' {%- endif %}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.run_query", "macro.dbt_external_tables.is_csv"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.551475, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake__refresh_external_table": { + "name": "snowflake__refresh_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/refresh_external_table.sql", + "original_file_path": "macros/plugins/snowflake/refresh_external_table.sql", + "unique_id": "macro.dbt_external_tables.snowflake__refresh_external_table", + "macro_sql": "{% macro snowflake__refresh_external_table(source_node) %}\n\n {% set external = source_node.external %}\n {% set snowpipe = source_node.external.get('snowpipe', none) %}\n \n {% set auto_refresh = external.get('auto_refresh', false) %}\n {% set partitions = external.get('partitions', none) %}\n \n {% set manual_refresh = (partitions and not auto_refresh) %}\n \n {% if manual_refresh %}\n\n {% set ddl %}\n begin;\n alter external table {{source(source_node.source_name, source_node.name)}} refresh;\n commit;\n {% endset %}\n \n {% do return([ddl]) %}\n \n {% else %}\n \n {% do return([]) %}\n \n {% endif %}\n \n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.552518, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake__create_external_schema": { + "name": "snowflake__create_external_schema", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/create_external_schema.sql", + "original_file_path": "macros/plugins/snowflake/create_external_schema.sql", + "unique_id": "macro.dbt_external_tables.snowflake__create_external_schema", + "macro_sql": "{% macro snowflake__create_external_schema(source_node) %}\n\n {% set schema_exists_query %}\n show terse schemas like '{{ source_node.schema }}' in database {{ source_node.database }} limit 1;\n {% endset %}\n {% if execute %}\n {% set schema_exists = run_query(schema_exists_query)|length > 0 %}\n {% else %}\n {% set schema_exists = false %}\n {% endif %} \n\n {% if schema_exists %}\n {% set ddl %}\n select 'Schema {{ source_node.schema }} exists' from dual;\n {% endset %}\n {% else %}\n {% set fqn %}\n {% if source_node.database %}\n {{ source_node.database }}.{{ source_node.schema }}\n {% else %}\n {{ source_node.schema }}\n {% endif %}\n {% endset %}\n\n {% set ddl %}\n create schema if not exists {{ fqn }};\n {% endset %}\n {% endif %}\n\n {% do return(ddl) %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.run_query"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.553951, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake__get_external_build_plan": { + "name": "snowflake__get_external_build_plan", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/get_external_build_plan.sql", + "original_file_path": "macros/plugins/snowflake/get_external_build_plan.sql", + "unique_id": "macro.dbt_external_tables.snowflake__get_external_build_plan", + "macro_sql": "{% macro snowflake__get_external_build_plan(source_node) %}\n\n {% set build_plan = [] %}\n \n {% set old_relation = adapter.get_relation(\n database = source_node.database,\n schema = source_node.schema,\n identifier = source_node.identifier\n ) %}\n \n {% set create_or_replace = (old_relation is none or var('ext_full_refresh', false)) %}\n\n {% if source_node.external.get('snowpipe', none) is not none %}\n \n {% if create_or_replace %}\n {% set build_plan = build_plan + [\n dbt_external_tables.create_external_schema(source_node),\n dbt_external_tables.snowflake_create_empty_table(source_node),\n dbt_external_tables.snowflake_get_copy_sql(source_node, explicit_transaction=true),\n dbt_external_tables.snowflake_create_snowpipe(source_node)\n ] %}\n {% else %}\n {% set build_plan = build_plan + dbt_external_tables.snowflake_refresh_snowpipe(source_node) %}\n {% endif %}\n \n {% else %}\n \n {% if create_or_replace %}\n {% set build_plan = build_plan + [\n dbt_external_tables.create_external_schema(source_node),\n dbt_external_tables.create_external_table(source_node)\n ] %}\n {% else %}\n {% set build_plan = build_plan + dbt_external_tables.refresh_external_table(source_node) %}\n {% endif %}\n \n {% endif %}\n\n {% do return(build_plan) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.create_external_schema", + "macro.dbt_external_tables.snowflake_create_empty_table", + "macro.dbt_external_tables.snowflake_get_copy_sql", + "macro.dbt_external_tables.snowflake_create_snowpipe", + "macro.dbt_external_tables.snowflake_refresh_snowpipe", + "macro.dbt_external_tables.create_external_table", + "macro.dbt_external_tables.refresh_external_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.556003, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake_create_snowpipe": { + "name": "snowflake_create_snowpipe", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/snowpipe/create_snowpipe.sql", + "original_file_path": "macros/plugins/snowflake/snowpipe/create_snowpipe.sql", + "unique_id": "macro.dbt_external_tables.snowflake_create_snowpipe", + "macro_sql": "{% macro snowflake_create_snowpipe(source_node) %}\n\n {%- set external = source_node.external -%}\n {%- set snowpipe = external.snowpipe -%}\n\n{# https://docs.snowflake.com/en/sql-reference/sql/create-pipe.html #}\n create or replace pipe {{source(source_node.source_name, source_node.name)}}\n {% if snowpipe.auto_ingest -%} auto_ingest = {{snowpipe.auto_ingest}} {%- endif %}\n {% if snowpipe.aws_sns_topic -%} aws_sns_topic = '{{snowpipe.aws_sns_topic}}' {%- endif %}\n {% if snowpipe.integration -%} integration = '{{snowpipe.integration}}' {%- endif %}\n {% if snowpipe.error_integration -%} error_integration = '{{snowpipe.error_integration}}' {%- endif %}\n as {{ dbt_external_tables.snowflake_get_copy_sql(source_node) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_external_tables.snowflake_get_copy_sql"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.557072, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake_create_empty_table": { + "name": "snowflake_create_empty_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/snowpipe/create_empty_table.sql", + "original_file_path": "macros/plugins/snowflake/snowpipe/create_empty_table.sql", + "unique_id": "macro.dbt_external_tables.snowflake_create_empty_table", + "macro_sql": "{% macro snowflake_create_empty_table(source_node) %}\n\n {%- set columns = source_node.columns.values() %}\n\n create or replace table {{source(source_node.source_name, source_node.name)}} (\n {% if columns|length == 0 %}\n value variant,\n {% else -%}\n {%- for column in columns -%}\n {{column.name}} {{column.data_type}},\n {% endfor -%}\n {% endif %}\n metadata_filename varchar,\n metadata_file_row_number bigint,\n _dbt_copied_at timestamp\n );\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.557803, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake_refresh_snowpipe": { + "name": "snowflake_refresh_snowpipe", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/snowpipe/refresh_snowpipe.sql", + "original_file_path": "macros/plugins/snowflake/snowpipe/refresh_snowpipe.sql", + "unique_id": "macro.dbt_external_tables.snowflake_refresh_snowpipe", + "macro_sql": "{% macro snowflake_refresh_snowpipe(source_node) %}\n\n {% set snowpipe = source_node.external.snowpipe %}\n {% set auto_ingest = snowpipe.get('auto_ingest', false) if snowpipe is mapping %}\n \n {% if auto_ingest is true %}\n \n {% do return([]) %}\n \n {% else %}\n \n {% set ddl %}\n alter pipe {{source(source_node.source_name, source_node.name)}} refresh\n {% endset %}\n \n {{ return([ddl]) }}\n \n {% endif %}\n \n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.558753, + "supported_languages": null + }, + "macro.dbt_external_tables.snowflake_get_copy_sql": { + "name": "snowflake_get_copy_sql", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/snowpipe/get_copy_sql.sql", + "original_file_path": "macros/plugins/snowflake/snowpipe/get_copy_sql.sql", + "unique_id": "macro.dbt_external_tables.snowflake_get_copy_sql", + "macro_sql": "{% macro snowflake_get_copy_sql(source_node, explicit_transaction=false) %}\n{# This assumes you have already created an external stage #}\n\n {%- set columns = source_node.columns.values() -%}\n {%- set external = source_node.external -%}\n {%- set is_csv = dbt_external_tables.is_csv(external.file_format) %}\n {%- set copy_options = external.snowpipe.get('copy_options', none) -%}\n \n {%- if explicit_transaction -%} begin; {%- endif %}\n \n copy into {{source(source_node.source_name, source_node.name)}}\n from ( \n select\n {% if columns|length == 0 %}\n $1::variant as value,\n {% else -%}\n {%- for column in columns -%}\n {%- set col_expression -%}\n {%- if is_csv -%}nullif(${{loop.index}},''){# special case: get columns by ordinal position #}\n {%- else -%}nullif($1:{{column.name}},''){# standard behavior: get columns by name #}\n {%- endif -%}\n {%- endset -%}\n {{col_expression}}::{{column.data_type}} as {{column.name}},\n {% endfor -%}\n {% endif %}\n metadata$filename::varchar as metadata_filename,\n metadata$file_row_number::bigint as metadata_file_row_number,\n current_timestamp::timestamp as _dbt_copied_at\n from {{external.location}} {# stage #}\n )\n file_format = {{external.file_format}}\n {% if external.pattern -%} pattern = '{{external.pattern}}' {%- endif %}\n {% if copy_options %} {{copy_options}} {% endif %};\n \n {% if explicit_transaction -%} commit; {%- endif -%}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_external_tables.is_csv"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.560464, + "supported_languages": null + }, + "macro.dbt_external_tables.is_csv": { + "name": "is_csv", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/snowflake/helpers/is_csv.sql", + "original_file_path": "macros/plugins/snowflake/helpers/is_csv.sql", + "unique_id": "macro.dbt_external_tables.is_csv", + "macro_sql": "{% macro is_csv(file_format) %}\n\n{# From https://docs.snowflake.net/manuals/sql-reference/sql/create-external-table.html:\n\nImportant: The external table does not inherit the file format, if any, in the \nstage definition. You must explicitly specify any file format options for the \nexternal table using the FILE_FORMAT parameter.\n\nNote: FORMAT_NAME and TYPE are mutually exclusive; to avoid unintended behavior, \nyou should only specify one or the other when creating an external table.\n\n#}\n\n {% set ff_ltrimmed = file_format|lower|replace(' ','') %}\n\n {% if 'type=' in ff_ltrimmed %}\n \n {% if 'type=csv' in ff_ltrimmed %}\n\n {{return(true)}}\n\n {% else %}\n\n {{return(false)}}\n \n {% endif %}\n \n {% else %}\n \n {% set ff_standardized = ff_ltrimmed\n | replace('(','') | replace(')','')\n | replace('format_name=','') %}\n {% set fqn = ff_standardized.split('.') %}\n \n {% if fqn | length == 3 %}\n {% set ff_database, ff_schema, ff_identifier = fqn[0], fqn[1], fqn[2] %}\n {% elif fqn | length == 2 %}\n {% set ff_database, ff_schema, ff_identifier = target.database, fqn[0], fqn[1] %}\n {% else %}\n {% set ff_database, ff_schema, ff_identifier = target.database, target.schema, fqn[0] %}\n {% endif %}\n \n {% call statement('get_file_format', fetch_result = True) %}\n show file formats in {{ff_database}}.{{ff_schema}}\n {% endcall %}\n \n {% set ffs = load_result('get_file_format').table %}\n \n {% for ff in ffs %}\n \n {% if ff['name']|lower == ff_identifier and ff['type']|lower == 'csv' %}\n \n {{return(true)}}\n \n {% endif %}\n \n {% endfor %}\n \n {{return(false)}} \n \n {% endif %}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.statement"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.56334, + "supported_languages": null + }, + "macro.dbt_external_tables.redshift__create_external_table": { + "name": "redshift__create_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/create_external_table.sql", + "original_file_path": "macros/plugins/redshift/create_external_table.sql", + "unique_id": "macro.dbt_external_tables.redshift__create_external_table", + "macro_sql": "{% macro redshift__create_external_table(source_node) %}\n\n {%- set columns = source_node.columns.values() -%}\n {%- set external = source_node.external -%}\n {%- set partitions = external.partitions -%}\n\n{# https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_EXTERNAL_TABLE.html #}\n{# This assumes you have already created an external schema #}\n\n create external table {{source(source_node.source_name, source_node.name)}} (\n {% for column in columns %}\n {{adapter.quote(column.name)}} {{column.data_type}}\n {{- ',' if not loop.last -}}\n {% endfor %}\n )\n {% if partitions -%} partitioned by (\n {%- for partition in partitions -%}\n {{adapter.quote(partition.name)}} {{partition.data_type}}{{', ' if not loop.last}}\n {%- endfor -%}\n ) {%- endif %}\n {% if external.row_format -%} row format {{external.row_format}} {%- endif %}\n {% if external.file_format -%} stored as {{external.file_format}} {%- endif %}\n {% if external.location -%} location '{{external.location}}' {%- endif %}\n {% if external.table_properties -%} table properties {{external.table_properties}} {%- endif %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.564842, + "supported_languages": null + }, + "macro.dbt_external_tables.redshift__refresh_external_table": { + "name": "redshift__refresh_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/refresh_external_table.sql", + "original_file_path": "macros/plugins/redshift/refresh_external_table.sql", + "unique_id": "macro.dbt_external_tables.redshift__refresh_external_table", + "macro_sql": "{% macro redshift__refresh_external_table(source_node) %}\n\n {%- set partitions = source_node.external.get('partitions',[]) -%}\n\n {%- if partitions -%}\n \n {%- set part_len = partitions|length -%}\n \n {%- set get_partitions_sql -%}\n \n select * from\n \n {%- for partition in partitions %} (\n \n {%- set part_num = loop.index -%}\n \n {%- if partition.vals.macro -%}\n {%- set vals = dbt_external_tables.render_from_context(partition.vals.macro, **partition.vals.args) -%}\n {%- elif partition.vals is string -%}\n {%- set vals = [partition.vals] -%}\n {%- else -%}\n {%- set vals = partition.vals -%}\n {%- endif -%}\n \n {%- for val in vals %}\n \n select\n '\"{{ partition.name }}\"' as name_{{ part_num }},\n '\"{{ val }}\"' as val_{{ part_num }},\n '\"{{ dbt_external_tables.render_from_context(partition.path_macro, partition.name, val) }}\"' as path_{{ part_num }}\n \n {{ 'union all' if not loop.last else ') ' }}\n \n {%- endfor -%}\n \n {{ 'cross join' if not loop.last }}\n \n {%- endfor -%}\n \n {%- endset -%}\n \n {%- set finals = [] -%}\n \n {%- if execute -%}\n {%- set results = run_query(get_partitions_sql) -%}\n {%- for row in results -%}\n \n {%- set partition_parts = [] -%}\n {%- set path_parts = [] -%}\n \n {%- for i in range(0, part_len) -%}\n {%- do partition_parts.append({\n 'name': row[i * 3][1:-1],\n 'value': row[i * 3 + 1][1:-1]\n }) -%}\n {%- do path_parts.append(row[i * 3 + 2][1:-1]) -%}\n {%- endfor -%}\n \n {%- set construct = {\n 'partition_by': partition_parts,\n 'path': path_parts | join('/')\n } -%}\n \n {% do finals.append(construct) %}\n {%- endfor -%}\n {%- endif -%}\n \n {%- set ddl = dbt_external_tables.redshift_alter_table_add_partitions(source_node, finals) -%}\n {{ return(ddl) }}\n \n {% else %}\n \n {% do return([]) %}\n \n {% endif %}\n \n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.render_from_context", + "macro.dbt.run_query", + "macro.dbt_external_tables.redshift_alter_table_add_partitions" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.570732, + "supported_languages": null + }, + "macro.dbt_external_tables.redshift__get_external_build_plan": { + "name": "redshift__get_external_build_plan", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/get_external_build_plan.sql", + "original_file_path": "macros/plugins/redshift/get_external_build_plan.sql", + "unique_id": "macro.dbt_external_tables.redshift__get_external_build_plan", + "macro_sql": "{% macro redshift__get_external_build_plan(source_node) %}\n\n {% set build_plan = [] %}\n \n {% set create_or_replace = (var('ext_full_refresh', false) or not dbt_external_tables.redshift_is_ext_tbl(source_node)) %}\n \n {% if create_or_replace %}\n\n {% set build_plan = [\n dbt_external_tables.dropif(source_node),\n dbt_external_tables.create_external_table(source_node)\n ] + dbt_external_tables.refresh_external_table(source_node) \n %}\n \n {% else %}\n \n {% set build_plan = dbt_external_tables.refresh_external_table(source_node) %}\n \n {% endif %}\n \n {% do return(build_plan) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.redshift_is_ext_tbl", + "macro.dbt_external_tables.dropif", + "macro.dbt_external_tables.create_external_table", + "macro.dbt_external_tables.refresh_external_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.571637, + "supported_languages": null + }, + "macro.dbt_external_tables.render_from_context": { + "name": "render_from_context", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/render_macro.sql", + "original_file_path": "macros/plugins/redshift/helpers/render_macro.sql", + "unique_id": "macro.dbt_external_tables.render_from_context", + "macro_sql": "{% macro render_from_context(name) -%}\n{% set original_name = name %}\n {% if '.' in name %}\n {% set package_name, name = name.split(\".\", 1) %}\n {% else %}\n {% set package_name = none %}\n {% endif %}\n\n {% if package_name is none %}\n {% set package_context = context %}\n {% elif package_name in context %}\n {% set package_context = context[package_name] %}\n {% else %}\n {% set error_msg %}\n Could not find package '{{package_name}}', called by macro '{{original_name}}'\n {% endset %}\n {{ exceptions.raise_compiler_error(error_msg | trim) }}\n {% endif %}\n \n {{ return(package_context[name](*varargs, **kwargs)) }}\n\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.572821, + "supported_languages": null + }, + "macro.dbt_external_tables.redshift__dropif": { + "name": "redshift__dropif", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/dropif.sql", + "original_file_path": "macros/plugins/redshift/helpers/dropif.sql", + "unique_id": "macro.dbt_external_tables.redshift__dropif", + "macro_sql": "{% macro redshift__dropif(node) %}\n \n {% set ddl %}\n drop table if exists {{source(node.source_name, node.name)}} cascade\n {% endset %}\n \n {{return(ddl)}}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5731819, + "supported_languages": null + }, + "macro.dbt_external_tables.redshift_alter_table_add_partitions": { + "name": "redshift_alter_table_add_partitions", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/add_partitions.sql", + "original_file_path": "macros/plugins/redshift/helpers/add_partitions.sql", + "unique_id": "macro.dbt_external_tables.redshift_alter_table_add_partitions", + "macro_sql": "{% macro redshift_alter_table_add_partitions(source_node, partitions) %}\n\n {{ log(\"Generating ADD PARTITION statement for partition set between \" \n ~ partitions[0]['path'] ~ \" and \" ~ (partitions|last)['path']) }}\n\n {% set ddl = [] %}\n \n {% if partitions|length > 0 %}\n \n {% set alter_table_add %}\n alter table {{source(source_node.source_name, source_node.name)}} add if not exists \n {% endset %}\n \n {%- set alters -%}\n\n {{ alter_table_add }}\n\n {%- for partition in partitions -%}\n\n {%- if loop.index0 != 0 and loop.index0 % 100 == 0 -%}\n\n ; {{ alter_table_add }}\n\n {%- endif -%}\n\n partition ({%- for part in partition.partition_by -%}{{ part.name }}='{{ part.value }}'{{', ' if not loop.last}}{%- endfor -%})\n location '{{ source_node.external.location }}/{{ partition.path }}/'\n\n {% endfor -%}\n \n {%- endset -%}\n \n {% set ddl = ddl + alters.split(';') %}\n\n {% else %}\n\n {{ log(\"No partitions to be added\") }}\n\n {% endif %}\n \n {% do return(ddl) %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.575136, + "supported_languages": null + }, + "macro.dbt_external_tables.redshift__exit_transaction": { + "name": "redshift__exit_transaction", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/transaction.sql", + "original_file_path": "macros/plugins/redshift/helpers/transaction.sql", + "unique_id": "macro.dbt_external_tables.redshift__exit_transaction", + "macro_sql": "{% macro redshift__exit_transaction() %}\n {{ return('begin; commit;') }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.575337, + "supported_languages": null + }, + "macro.dbt_external_tables.year_month_day": { + "name": "year_month_day", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/paths.sql", + "original_file_path": "macros/plugins/redshift/helpers/paths.sql", + "unique_id": "macro.dbt_external_tables.year_month_day", + "macro_sql": "{% macro year_month_day(name, value) %}\n {% set path = value.replace('-','/') %}\n {{return(path)}}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt.replace"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5756958, + "supported_languages": null + }, + "macro.dbt_external_tables.key_value": { + "name": "key_value", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/paths.sql", + "original_file_path": "macros/plugins/redshift/helpers/paths.sql", + "unique_id": "macro.dbt_external_tables.key_value", + "macro_sql": "{% macro key_value(name, value) %}\n {% set path = name ~ '=' ~ value %}\n {{return(path)}}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.575902, + "supported_languages": null + }, + "macro.dbt_external_tables.value_only": { + "name": "value_only", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/paths.sql", + "original_file_path": "macros/plugins/redshift/helpers/paths.sql", + "unique_id": "macro.dbt_external_tables.value_only", + "macro_sql": "{% macro value_only(name, value) %}\n {% set path = value %}\n {{return(path)}}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5760791, + "supported_languages": null + }, + "macro.dbt_external_tables.redshift_is_ext_tbl": { + "name": "redshift_is_ext_tbl", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/redshift/helpers/is_ext_tbl.sql", + "original_file_path": "macros/plugins/redshift/helpers/is_ext_tbl.sql", + "unique_id": "macro.dbt_external_tables.redshift_is_ext_tbl", + "macro_sql": "{% macro redshift_is_ext_tbl(node) %}\n\n {% set existing_relation = load_relation(node) %}\n \n {# external tables don't appear in information_schema.tables,\n so dbt doesn't cache them #}\n {% if existing_relation is none %}\n\n {% set find_ext_tbl %}\n \n select count(*) from svv_external_tables\n where schemaname = '{{node.schema}}'\n and tablename = '{{node.identifier}}'\n \n {% endset %}\n \n {% if execute %}\n {% set result = run_query(find_ext_tbl)[0][0] %}\n {% else %}\n {% set result = 0 %}\n {% endif %}\n\n {% set is_ext_tbl = (result > 0) %}\n {% do return(is_ext_tbl) %}\n \n {% else %}\n \n {% do return(false) %}\n \n {% endif %}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt.load_relation", "macro.dbt.run_query"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5771148, + "supported_languages": null + }, + "macro.dbt_external_tables.sqlserver__create_external_table": { + "name": "sqlserver__create_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/sqlserver/create_external_table.sql", + "original_file_path": "macros/plugins/sqlserver/create_external_table.sql", + "unique_id": "macro.dbt_external_tables.sqlserver__create_external_table", + "macro_sql": "{% macro sqlserver__create_external_table(source_node) %}\n\n {%- set columns = source_node.columns.values() -%}\n {%- set external = source_node.external -%}\n\n {% if external.ansi_nulls is true -%} SET ANSI_NULLS ON; {%- endif %}\n {% if external.quoted_identifier is true -%} SET QUOTED_IDENTIFIER ON; {%- endif %}\n\n create external table {{source(source_node.source_name, source_node.name)}} (\n {% for column in columns %}\n {# TODO set nullity based on schema tests?? #}\n {%- set nullity = 'NOT NULL' if 'not_null' in columns.tests else 'NULL'-%}\n {{adapter.quote(column.name)}} {{column.data_type}} {{nullity}}\n {{- ',' if not loop.last -}}\n {% endfor %}\n )\n WITH (\n {# remove keys that are None (i.e. not defined for a given source) #}\n {%- for key, value in external.items() if value is not none and key not in ['ansi_nulls', 'quoted_identifier'] -%}\n {{key}} = \n {%- if key in [\"location\", \"schema_name\", \"object_name\"] -%}\n '{{value}}'\n {% elif key in [\"data_source\",\"file_format\"] -%}\n [{{value}}]\n {% else -%}\n {{value}}\n {%- endif -%}\n {{- ',' if not loop.last -}}\n {%- endfor -%}\n )\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5787652, + "supported_languages": null + }, + "macro.dbt_external_tables.sqlserver__create_external_schema": { + "name": "sqlserver__create_external_schema", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/sqlserver/create_external_schema.sql", + "original_file_path": "macros/plugins/sqlserver/create_external_schema.sql", + "unique_id": "macro.dbt_external_tables.sqlserver__create_external_schema", + "macro_sql": "{% macro sqlserver__create_external_schema(source_node) %}\n {# https://learn.microsoft.com/en-us/sql/t-sql/statements/create-schema-transact-sql?view=sql-server-ver16 #}\n\n {% set ddl %}\n IF NOT EXISTS (SELECT * FROM sys.schemas WHERE name = '{{ source_node.schema }}')\n BEGIN\n EXEC('CREATE SCHEMA [{{ source_node.schema }}]')\n END \n {% endset %}\n\n {{return(ddl)}}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.57915, + "supported_languages": null + }, + "macro.dbt_external_tables.sqlserver__get_external_build_plan": { + "name": "sqlserver__get_external_build_plan", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/sqlserver/get_external_build_plan.sql", + "original_file_path": "macros/plugins/sqlserver/get_external_build_plan.sql", + "unique_id": "macro.dbt_external_tables.sqlserver__get_external_build_plan", + "macro_sql": "{% macro sqlserver__get_external_build_plan(source_node) %}\n\n {% set build_plan = [] %}\n\n {% set old_relation = adapter.get_relation(\n database = source_node.database,\n schema = source_node.schema,\n identifier = source_node.identifier\n ) %}\n\n {% set create_or_replace = (old_relation is none or var('ext_full_refresh', false)) %}\n\n {% if create_or_replace %}\n {% set build_plan = build_plan + [ \n dbt_external_tables.create_external_schema(source_node),\n dbt_external_tables.dropif(source_node), \n dbt_external_tables.create_external_table(source_node)\n ] %}\n {% else %}\n {% set build_plan = build_plan + dbt_external_tables.refresh_external_table(source_node) %}\n {% endif %}\n {% do return(build_plan) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.create_external_schema", + "macro.dbt_external_tables.dropif", + "macro.dbt_external_tables.create_external_table", + "macro.dbt_external_tables.refresh_external_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.580123, + "supported_languages": null + }, + "macro.dbt_external_tables.sqlserver__dropif": { + "name": "sqlserver__dropif", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/sqlserver/helpers/dropif.sql", + "original_file_path": "macros/plugins/sqlserver/helpers/dropif.sql", + "unique_id": "macro.dbt_external_tables.sqlserver__dropif", + "macro_sql": "{% macro sqlserver__dropif(node) %}\n \n {% set ddl %}\n if object_id ('{{source(node.source_name, node.name)}}') is not null\n begin\n drop external table {{source(node.source_name, node.name)}}\n end\n {% endset %}\n \n {{return(ddl)}}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.58054, + "supported_languages": null + }, + "macro.dbt_external_tables.spark__create_external_table": { + "name": "spark__create_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/spark/create_external_table.sql", + "original_file_path": "macros/plugins/spark/create_external_table.sql", + "unique_id": "macro.dbt_external_tables.spark__create_external_table", + "macro_sql": "{% macro spark__create_external_table(source_node) %}\n\n {%- set columns = source_node.columns.values() -%}\n {%- set external = source_node.external -%}\n {%- set partitions = external.partitions -%}\n {%- set options = external.options -%}\n\n {%- set columns_and_partitions = columns | list -%}\n {%- if partitions -%}\n {%- for i in partitions -%}\n {%- if i.name not in columns_and_partitions | list | map(attribute='name') -%}\n {%- do columns_and_partitions.append(i) -%}\n {%- endif -%}\n {%- endfor -%}\n {%- endif -%}\n\n{# https://spark.apache.org/docs/latest/sql-data-sources-hive-tables.html #}\n create table {{source(source_node.source_name, source_node.name)}} \n {%- if columns | length > 0 %} (\n {% for column in columns_and_partitions %}\n {{column.name}} {{column.data_type}}\n {{- ',' if not loop.last -}}\n {% endfor %}\n ) {% endif -%}\n {% if external.using %} using {{external.using}} {%- endif %}\n {% if options -%} options (\n {%- for key, value in options.items() -%}\n '{{ key }}' = '{{value}}' {{- ', \\n' if not loop.last -}}\n {%- endfor -%}\n ) {%- endif %}\n {% if partitions -%} partitioned by (\n {%- for partition in partitions -%}\n {{partition.name}}{{', ' if not loop.last}}\n {%- endfor -%}\n ) {%- endif %}\n {% if external.row_format -%} row format {{external.row_format}} {%- endif %}\n {% if external.file_format -%} stored as {{external.file_format}} {%- endif %}\n {% if external.location -%} location '{{external.location}}' {%- endif %}\n {% if external.table_properties -%} tblproperties {{ external.table_properties }} {%- endif -%}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.583384, + "supported_languages": null + }, + "macro.dbt_external_tables.spark__refresh_external_table": { + "name": "spark__refresh_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/spark/refresh_external_table.sql", + "original_file_path": "macros/plugins/spark/refresh_external_table.sql", + "unique_id": "macro.dbt_external_tables.spark__refresh_external_table", + "macro_sql": "{% macro spark__refresh_external_table(source_node) %}\n \n {% set refresh %}\n refresh table {{source(source_node.source_name, source_node.name)}}\n {% endset %}\n \n {% do return([refresh]) %}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.583819, + "supported_languages": null + }, + "macro.dbt_external_tables.spark__get_external_build_plan": { + "name": "spark__get_external_build_plan", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/spark/get_external_build_plan.sql", + "original_file_path": "macros/plugins/spark/get_external_build_plan.sql", + "unique_id": "macro.dbt_external_tables.spark__get_external_build_plan", + "macro_sql": "{% macro spark__get_external_build_plan(source_node) %}\n\n {% set build_plan = [] %}\n\n {% set old_relation = adapter.get_relation(\n database = none,\n schema = source_node.schema,\n identifier = source_node.identifier\n ) %}\n\n {% set create_or_replace = (old_relation is none or var('ext_full_refresh', false)) %}\n\n {% if create_or_replace %}\n {% set build_plan = build_plan + [\n dbt_external_tables.create_external_schema(source_node),\n dbt_external_tables.dropif(source_node), \n dbt_external_tables.create_external_table(source_node)\n ] %}\n {% else %}\n {% set build_plan = build_plan + dbt_external_tables.refresh_external_table(source_node) %}\n {% endif %}\n\n {% set recover_partitions = dbt_external_tables.recover_partitions(source_node) %}\n {% if recover_partitions %}\n {% set build_plan = build_plan + [\n recover_partitions\n ] %}\n {% endif %}\n\n {% do return(build_plan) %}\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.create_external_schema", + "macro.dbt_external_tables.dropif", + "macro.dbt_external_tables.create_external_table", + "macro.dbt_external_tables.refresh_external_table", + "macro.dbt_external_tables.recover_partitions" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.585133, + "supported_languages": null + }, + "macro.dbt_external_tables.spark__recover_partitions": { + "name": "spark__recover_partitions", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/spark/helpers/recover_partitions.sql", + "original_file_path": "macros/plugins/spark/helpers/recover_partitions.sql", + "unique_id": "macro.dbt_external_tables.spark__recover_partitions", + "macro_sql": "{% macro spark__recover_partitions(source_node) %}\n {# https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-alter-table.html #}\n\n {%- if source_node.external.partitions and source_node.external.using and source_node.external.using|lower != 'delta' -%}\n {% set ddl %}\n ALTER TABLE {{ source(source_node.source_name, source_node.name) }} RECOVER PARTITIONS\n {% endset %}\n {%- else -%}\n {% set ddl = none %}\n {%- endif -%}\n\n {{return(ddl)}}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.585834, + "supported_languages": null + }, + "macro.dbt_external_tables.recover_partitions": { + "name": "recover_partitions", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/spark/helpers/recover_partitions.sql", + "original_file_path": "macros/plugins/spark/helpers/recover_partitions.sql", + "unique_id": "macro.dbt_external_tables.recover_partitions", + "macro_sql": "{% macro recover_partitions(source_node) %}\n {{ return(adapter.dispatch('recover_partitions', 'dbt_external_tables')(source_node)) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_external_tables.default__recover_partitions"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.586041, + "supported_languages": null + }, + "macro.dbt_external_tables.default__recover_partitions": { + "name": "default__recover_partitions", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/spark/helpers/recover_partitions.sql", + "original_file_path": "macros/plugins/spark/helpers/recover_partitions.sql", + "unique_id": "macro.dbt_external_tables.default__recover_partitions", + "macro_sql": "{% macro default__recover_partitions(source_node) %}\n /*{# \n We're dispatching this macro so that users can override it if required on other adapters\n but this will work for spark/databricks. \n #}*/\n\n {{ exceptions.raise_not_implemented('recover_partitions macro not implemented for adapter ' + adapter.type()) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5862248, + "supported_languages": null + }, + "macro.dbt_external_tables.spark__dropif": { + "name": "spark__dropif", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/plugins/spark/helpers/dropif.sql", + "original_file_path": "macros/plugins/spark/helpers/dropif.sql", + "unique_id": "macro.dbt_external_tables.spark__dropif", + "macro_sql": "{% macro spark__dropif(node) %}\n \n {% set ddl %}\n drop table if exists {{source(node.source_name, node.name)}}\n {% endset %}\n \n {{return(ddl)}}\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5865338, + "supported_languages": null + }, + "macro.dbt_external_tables.create_external_table": { + "name": "create_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/create_external_table.sql", + "original_file_path": "macros/common/create_external_table.sql", + "unique_id": "macro.dbt_external_tables.create_external_table", + "macro_sql": "{% macro create_external_table(source_node) %}\n {{ adapter.dispatch('create_external_table', 'dbt_external_tables')(source_node) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_external_tables.snowflake__create_external_table"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5867898, + "supported_languages": null + }, + "macro.dbt_external_tables.default__create_external_table": { + "name": "default__create_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/create_external_table.sql", + "original_file_path": "macros/common/create_external_table.sql", + "unique_id": "macro.dbt_external_tables.default__create_external_table", + "macro_sql": "{% macro default__create_external_table(source_node) %}\n {{ exceptions.raise_compiler_error(\"External table creation is not implemented for the default adapter\") }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.586932, + "supported_languages": null + }, + "macro.dbt_external_tables.refresh_external_table": { + "name": "refresh_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/refresh_external_table.sql", + "original_file_path": "macros/common/refresh_external_table.sql", + "unique_id": "macro.dbt_external_tables.refresh_external_table", + "macro_sql": "{% macro refresh_external_table(source_node) %}\n {{ return(adapter.dispatch('refresh_external_table', 'dbt_external_tables')(source_node)) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.snowflake__refresh_external_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.587191, + "supported_languages": null + }, + "macro.dbt_external_tables.default__refresh_external_table": { + "name": "default__refresh_external_table", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/refresh_external_table.sql", + "original_file_path": "macros/common/refresh_external_table.sql", + "unique_id": "macro.dbt_external_tables.default__refresh_external_table", + "macro_sql": "{% macro default__refresh_external_table(source_node) %}\n {% do return([]) %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5873501, + "supported_languages": null + }, + "macro.dbt_external_tables.create_external_schema": { + "name": "create_external_schema", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/create_external_schema.sql", + "original_file_path": "macros/common/create_external_schema.sql", + "unique_id": "macro.dbt_external_tables.create_external_schema", + "macro_sql": "{%- macro create_external_schema(source_node) -%}\n {{ adapter.dispatch('create_external_schema', 'dbt_external_tables')(source_node) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.snowflake__create_external_schema" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.587765, + "supported_languages": null + }, + "macro.dbt_external_tables.default__create_external_schema": { + "name": "default__create_external_schema", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/create_external_schema.sql", + "original_file_path": "macros/common/create_external_schema.sql", + "unique_id": "macro.dbt_external_tables.default__create_external_schema", + "macro_sql": "{%- macro default__create_external_schema(source_node) -%}\n {%- set fqn -%}\n {%- if source_node.database -%}\n {{ source_node.database }}.{{ source_node.schema }}\n {%- else -%}\n {{ source_node.schema }}\n {%- endif -%}\n {%- endset -%}\n\n {%- set ddl -%}\n create schema if not exists {{ fqn }}\n {%- endset -%}\n\n {{ return(ddl) }}\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.588129, + "supported_languages": null + }, + "macro.dbt_external_tables.get_external_build_plan": { + "name": "get_external_build_plan", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/get_external_build_plan.sql", + "original_file_path": "macros/common/get_external_build_plan.sql", + "unique_id": "macro.dbt_external_tables.get_external_build_plan", + "macro_sql": "{% macro get_external_build_plan(source_node) %}\n {{ return(adapter.dispatch('get_external_build_plan', 'dbt_external_tables')(source_node)) }}\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.snowflake__get_external_build_plan" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.588413, + "supported_languages": null + }, + "macro.dbt_external_tables.default__get_external_build_plan": { + "name": "default__get_external_build_plan", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/get_external_build_plan.sql", + "original_file_path": "macros/common/get_external_build_plan.sql", + "unique_id": "macro.dbt_external_tables.default__get_external_build_plan", + "macro_sql": "{% macro default__get_external_build_plan(source_node) %}\n {{ exceptions.raise_compiler_error(\"Staging external sources is not implemented for the default adapter\") }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.588554, + "supported_languages": null + }, + "macro.dbt_external_tables.stage_external_sources": { + "name": "stage_external_sources", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/stage_external_sources.sql", + "original_file_path": "macros/common/stage_external_sources.sql", + "unique_id": "macro.dbt_external_tables.stage_external_sources", + "macro_sql": "{% macro stage_external_sources(select=none) %}\n\n {% set sources_to_stage = [] %}\n \n {% set source_nodes = graph.sources.values() if graph.sources else [] %}\n \n {% for node in source_nodes %}\n {% if node.external %}\n \n {% if select %}\n \n {% for src in select.split(' ') %}\n \n {% if '.' in src %}\n {% set src_s = src.split('.') %}\n {% if src_s[0] == node.source_name and src_s[1] == node.name %}\n {% do sources_to_stage.append(node) %}\n {% endif %}\n {% else %}\n {% if src == node.source_name %}\n {% do sources_to_stage.append(node) %}\n {% endif %}\n {% endif %}\n \n {% endfor %}\n \n {% else %}\n \n {% do sources_to_stage.append(node) %}\n \n {% endif %}\n {% endif %}\n \n {% endfor %}\n \n {% if sources_to_stage|length == 0 %}\n {% do log('No external sources selected', info = true) %}\n {% endif %}\n \n {% for node in sources_to_stage %}\n\n {% set loop_label = loop.index ~ ' of ' ~ loop.length %}\n\n {% do log(loop_label ~ ' START external source ' ~ node.schema ~ '.' ~ node.identifier, info = true) -%}\n \n {% set run_queue = dbt_external_tables.get_external_build_plan(node) %}\n \n {% do log(loop_label ~ ' SKIP', info = true) if run_queue == [] %}\n {% set width = flags.PRINTER_WIDTH %}\n \n {% for q in run_queue %}\n \n {% set q_msg = q|replace('\\n','')|replace('begin;','')|trim %}\n {% set q_log = q_msg[:width] ~ '... ' if q_msg|length > width else q_msg %}\n \n {% do log(loop_label ~ ' (' ~ loop.index ~ ') ' ~ q_log, info = true) %}\n {% set exit_txn = dbt_external_tables.exit_transaction() %}\n \n {% call statement('runner', fetch_result = True, auto_begin = False) %}\n {{ exit_txn }} {{ q }}\n {% endcall %}\n \n {% set runner = load_result('runner') %}\n {% set log_msg = runner['response'] if 'response' in runner.keys() else runner['status'] %}\n {% do log(loop_label ~ ' (' ~ loop.index ~ ') ' ~ log_msg, info = true) %}\n \n {% endfor %}\n \n {% endfor %}\n \n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_external_tables.get_external_build_plan", + "macro.dbt_external_tables.exit_transaction", + "macro.dbt.statement" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.59484, + "supported_languages": null + }, + "macro.dbt_external_tables.dropif": { + "name": "dropif", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/helpers/dropif.sql", + "original_file_path": "macros/common/helpers/dropif.sql", + "unique_id": "macro.dbt_external_tables.dropif", + "macro_sql": "{% macro dropif(node) %}\n {{ adapter.dispatch('dropif', 'dbt_external_tables')(node) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_external_tables.default__dropif"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5951629, + "supported_languages": null + }, + "macro.dbt_external_tables.default__dropif": { + "name": "default__dropif", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/helpers/dropif.sql", + "original_file_path": "macros/common/helpers/dropif.sql", + "unique_id": "macro.dbt_external_tables.default__dropif", + "macro_sql": "{% macro default__dropif() %}\n {{ exceptions.raise_compiler_error(\n \"Dropping external tables is not implemented for the default adapter\"\n ) }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.595314, + "supported_languages": null + }, + "macro.dbt_external_tables.exit_transaction": { + "name": "exit_transaction", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/helpers/transaction.sql", + "original_file_path": "macros/common/helpers/transaction.sql", + "unique_id": "macro.dbt_external_tables.exit_transaction", + "macro_sql": "{% macro exit_transaction() %}\n {{ return(adapter.dispatch('exit_transaction', 'dbt_external_tables')()) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_external_tables.default__exit_transaction"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5955782, + "supported_languages": null + }, + "macro.dbt_external_tables.default__exit_transaction": { + "name": "default__exit_transaction", + "resource_type": "macro", + "package_name": "dbt_external_tables", + "path": "macros/common/helpers/transaction.sql", + "original_file_path": "macros/common/helpers/transaction.sql", + "unique_id": "macro.dbt_external_tables.default__exit_transaction", + "macro_sql": "{% macro default__exit_transaction() %}\n {{ return('') }}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5957172, + "supported_languages": null + }, + "macro.dbt_expectations.type_timestamp": { + "name": "type_timestamp", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.type_timestamp", + "macro_sql": "\n{%- macro type_timestamp() -%}\n {{ return(adapter.dispatch('type_timestamp', 'dbt_expectations')()) }}\n{%- endmacro -%}\n\n", + "depends_on": { + "macros": ["macro.dbt_expectations.snowflake__type_timestamp"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.596141, + "supported_languages": null + }, + "macro.dbt_expectations.default__type_timestamp": { + "name": "default__type_timestamp", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.default__type_timestamp", + "macro_sql": "{% macro default__type_timestamp() -%}\n timestamp\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.596225, + "supported_languages": null + }, + "macro.dbt_expectations.snowflake__type_timestamp": { + "name": "snowflake__type_timestamp", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.snowflake__type_timestamp", + "macro_sql": "{% macro snowflake__type_timestamp() -%}\n timestamp_ntz\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.596307, + "supported_languages": null + }, + "macro.dbt_expectations.postgres__type_timestamp": { + "name": "postgres__type_timestamp", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.postgres__type_timestamp", + "macro_sql": "{% macro postgres__type_timestamp() -%}\n timestamp without time zone\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.596388, + "supported_languages": null + }, + "macro.dbt_expectations.type_datetime": { + "name": "type_datetime", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.type_datetime", + "macro_sql": "{% macro type_datetime() -%}\n {{ return(adapter.dispatch('type_datetime', 'dbt_expectations')()) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_expectations.snowflake__type_datetime"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5965598, + "supported_languages": null + }, + "macro.dbt_expectations.default__type_datetime": { + "name": "default__type_datetime", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.default__type_datetime", + "macro_sql": "{% macro default__type_datetime() -%}\n datetime\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5966442, + "supported_languages": null + }, + "macro.dbt_expectations.snowflake__type_datetime": { + "name": "snowflake__type_datetime", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.snowflake__type_datetime", + "macro_sql": "{% macro snowflake__type_datetime() -%}\n timestamp_ntz\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.596722, + "supported_languages": null + }, + "macro.dbt_expectations.postgres__type_datetime": { + "name": "postgres__type_datetime", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/datatypes.sql", + "original_file_path": "macros/utils/datatypes.sql", + "unique_id": "macro.dbt_expectations.postgres__type_datetime", + "macro_sql": "{% macro postgres__type_datetime() -%}\n timestamp without time zone\n{%- endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.596843, + "supported_languages": null + }, + "macro.dbt_expectations.group_by": { + "name": "group_by", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/groupby.sql", + "original_file_path": "macros/utils/groupby.sql", + "unique_id": "macro.dbt_expectations.group_by", + "macro_sql": "{%- macro group_by(n) -%}\n {{ return(adapter.dispatch('group_by', 'dbt_expectations')(n)) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations.default__group_by"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5972412, + "supported_languages": null + }, + "macro.dbt_expectations.default__group_by": { + "name": "default__group_by", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/utils/groupby.sql", + "original_file_path": "macros/utils/groupby.sql", + "unique_id": "macro.dbt_expectations.default__group_by", + "macro_sql": "\n\n{%- macro default__group_by(n) -%}\n\n group by {% for i in range(1, n + 1) -%}\n {{ i }}{{ ',' if not loop.last }}\n {%- endfor -%}\n\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.597513, + "supported_languages": null + }, + "macro.dbt_expectations.regexp_instr": { + "name": "regexp_instr", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations.regexp_instr", + "macro_sql": "{% macro regexp_instr(source_value, regexp, position=1, occurrence=1, is_raw=False, flags=\"\") %}\n\n {{ adapter.dispatch('regexp_instr', 'dbt_expectations')(\n source_value, regexp, position, occurrence, is_raw, flags\n ) }}\n\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_expectations.snowflake__regexp_instr"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.5987341, + "supported_languages": null + }, + "macro.dbt_expectations.default__regexp_instr": { + "name": "default__regexp_instr", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations.default__regexp_instr", + "macro_sql": "{% macro default__regexp_instr(source_value, regexp, position, occurrence, is_raw, flags) %}\n{# unclear if other databases support raw strings or flags #}\n{% if is_raw or flags %}\n {{ exceptions.warn(\n \"is_raw and flags options are not supported for this adapter \"\n ~ \"and are being ignored.\"\n ) }}\n{% endif %}\nregexp_instr({{ source_value }}, '{{ regexp }}', {{ position }}, {{ occurrence }})\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.599221, + "supported_languages": null + }, + "macro.dbt_expectations.snowflake__regexp_instr": { + "name": "snowflake__regexp_instr", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations.snowflake__regexp_instr", + "macro_sql": "{% macro snowflake__regexp_instr(source_value, regexp, position, occurrence, is_raw, flags) %}\n{%- set regexp = \"$$\" ~ regexp ~ \"$$\" if is_raw else \"'\" ~ regexp ~ \"'\" -%}\n{% if flags %}{{ dbt_expectations._validate_flags(flags, 'cimes') }}{% endif %}\nregexp_instr({{ source_value }}, {{ regexp }}, {{ position }}, {{ occurrence }}, 0, '{{ flags }}')\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations._validate_flags"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.599715, + "supported_languages": null + }, + "macro.dbt_expectations.bigquery__regexp_instr": { + "name": "bigquery__regexp_instr", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations.bigquery__regexp_instr", + "macro_sql": "{% macro bigquery__regexp_instr(source_value, regexp, position, occurrence, is_raw, flags) %}\n{% if flags %}\n {{ dbt_expectations._validate_re2_flags(flags) }}\n {# BigQuery prepends \"(?flags)\" to set flags for current group #}\n {%- set regexp = \"(?\" ~ flags ~ \")\" ~ regexp -%}\n{% endif %}\n{%- set regexp = \"r'\" ~ regexp ~ \"'\" if is_raw else \"'\" ~ regexp ~ \"'\" -%}\nregexp_instr({{ source_value }}, {{ regexp }}, {{ position }}, {{ occurrence }})\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_expectations._validate_re2_flags"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6003609, + "supported_languages": null + }, + "macro.dbt_expectations.postgres__regexp_instr": { + "name": "postgres__regexp_instr", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations.postgres__regexp_instr", + "macro_sql": "{% macro postgres__regexp_instr(source_value, regexp, position, occurrence, is_raw, flags) %}\n{% if flags %}{{ dbt_expectations._validate_flags(flags, 'bcegimnpqstwx') }}{% endif %}\ncoalesce(array_length((select regexp_matches({{ source_value }}, '{{ regexp }}', '{{ flags }}')), 1), 0)\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations._validate_flags"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.60069, + "supported_languages": null + }, + "macro.dbt_expectations.redshift__regexp_instr": { + "name": "redshift__regexp_instr", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations.redshift__regexp_instr", + "macro_sql": "{% macro redshift__regexp_instr(source_value, regexp, position, occurrence, is_raw, flags) %}\n{% if flags %}{{ dbt_expectations._validate_flags(flags, 'ciep') }}{% endif %}\nregexp_instr({{ source_value }}, '{{ regexp }}', {{ position }}, {{ occurrence }}, 0, '{{ flags }}')\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations._validate_flags"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.601045, + "supported_languages": null + }, + "macro.dbt_expectations._validate_flags": { + "name": "_validate_flags", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations._validate_flags", + "macro_sql": "{% macro _validate_flags(flags, alphabet) %}\n{% for flag in flags %}\n {% if flag not in alphabet %}\n {# Using raise_compiler_error causes disabled tests with invalid flags to fail compilation #}\n {{ exceptions.warn(\n \"flag \" ~ flag ~ \" not in list of allowed flags for this adapter: \" ~ alphabet | join(\", \")\n ) }}\n {% endif %}\n{% endfor %}\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.601417, + "supported_languages": null + }, + "macro.dbt_expectations._validate_re2_flags": { + "name": "_validate_re2_flags", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/regex/regexp_instr.sql", + "original_file_path": "macros/regex/regexp_instr.sql", + "unique_id": "macro.dbt_expectations._validate_re2_flags", + "macro_sql": "{% macro _validate_re2_flags(flags) %}\n{# Re2 supports following flags: #}\n{# i : case-insensitive (default fault) #}\n{# m : multi-line mode: ^ and $ match begin/end line in addition to begin/end text (default false) #}\n{# s : let . match \\n (default false) #}\n{# U : ungreedy: swap meaning of x* and x*?, x+ and x+?, etc (default false) #}\n{# Flag syntax is xyz (set) or -xyz (clear) or xy-z (set xy, clear z). #}\n\n{# Regex explanation: do not allow consecutive dashes, accept all re2 flags and clear operator, do not end with a dash #}\n{% set re2_flags_pattern = '^(?!.*--)[-imsU]*(?{{ strict_operator }} {{ min_value }}{% endif %}\n{%- if max_value is not none %} and {{ expression | trim }} <{{ strict_operator }} {{ max_value }}{% endif %}\n)\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression_min_max,\n group_by_columns=group_by_columns,\n row_condition=row_condition)\n }}\n\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.60816, + "supported_languages": null + }, + "macro.dbt_expectations.test_expression_is_true": { + "name": "test_expression_is_true", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/expression_is_true.sql", + "original_file_path": "macros/schema_tests/_generalized/expression_is_true.sql", + "unique_id": "macro.dbt_expectations.test_expression_is_true", + "macro_sql": "{% test expression_is_true(model,\n expression,\n test_condition=\"= true\",\n group_by_columns=None,\n row_condition=None\n ) %}\n\n {{ dbt_expectations.expression_is_true(model, expression, test_condition, group_by_columns, row_condition) }}\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.609205, + "supported_languages": null + }, + "macro.dbt_expectations.expression_is_true": { + "name": "expression_is_true", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/expression_is_true.sql", + "original_file_path": "macros/schema_tests/_generalized/expression_is_true.sql", + "unique_id": "macro.dbt_expectations.expression_is_true", + "macro_sql": "{% macro expression_is_true(model,\n expression,\n test_condition=\"= true\",\n group_by_columns=None,\n row_condition=None\n ) %}\n {{ adapter.dispatch('expression_is_true', 'dbt_expectations') (model, expression, test_condition, group_by_columns, row_condition) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_expectations.default__expression_is_true"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.609528, + "supported_languages": null + }, + "macro.dbt_expectations.default__expression_is_true": { + "name": "default__expression_is_true", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/expression_is_true.sql", + "original_file_path": "macros/schema_tests/_generalized/expression_is_true.sql", + "unique_id": "macro.dbt_expectations.default__expression_is_true", + "macro_sql": "{% macro default__expression_is_true(model, expression, test_condition, group_by_columns, row_condition) -%}\nwith grouped_expression as (\n select\n {% if group_by_columns %}\n {% for group_by_column in group_by_columns -%}\n {{ group_by_column }} as col_{{ loop.index }},\n {% endfor -%}\n {% endif %}\n {{ dbt_expectations.truth_expression(expression) }}\n from {{ model }}\n {%- if row_condition %}\n where\n {{ row_condition }}\n {% endif %}\n {% if group_by_columns %}\n group by\n {% for group_by_column in group_by_columns -%}\n {{ group_by_column }}{% if not loop.last %},{% endif %}\n {% endfor %}\n {% endif %}\n\n),\nvalidation_errors as (\n\n select\n *\n from\n grouped_expression\n where\n not(expression {{ test_condition }})\n\n)\n\nselect *\nfrom validation_errors\n\n\n{% endmacro -%}", + "depends_on": { "macros": ["macro.dbt_expectations.truth_expression"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.610183, + "supported_languages": null + }, + "macro.dbt_expectations.get_select": { + "name": "get_select", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/equal_expression.sql", + "original_file_path": "macros/schema_tests/_generalized/equal_expression.sql", + "unique_id": "macro.dbt_expectations.get_select", + "macro_sql": "{% macro get_select(model, expression, row_condition, group_by) -%}\n {{ adapter.dispatch('get_select', 'dbt_expectations') (model, expression, row_condition, group_by) }}\n{%- endmacro %}", + "depends_on": { + "macros": ["macro.dbt_expectations.default__get_select"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.616162, + "supported_languages": null + }, + "macro.dbt_expectations.default__get_select": { + "name": "default__get_select", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/equal_expression.sql", + "original_file_path": "macros/schema_tests/_generalized/equal_expression.sql", + "unique_id": "macro.dbt_expectations.default__get_select", + "macro_sql": "\n\n{%- macro default__get_select(model, expression, row_condition, group_by) %}\n select\n {% if group_by %}\n {% for g in group_by -%}\n {{ g }} as col_{{ loop.index }},\n {% endfor -%}\n {% endif %}\n {{ expression }} as expression\n from\n {{ model }}\n {%- if row_condition %}\n where\n {{ row_condition }}\n {% endif %}\n {% if group_by %}\n group by\n {% for g in group_by -%}\n {{ loop.index }}{% if not loop.last %},{% endif %}\n {% endfor %}\n {% endif %}\n{% endmacro -%}\n\n\n", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6167412, + "supported_languages": null + }, + "macro.dbt_expectations.test_equal_expression": { + "name": "test_equal_expression", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/equal_expression.sql", + "original_file_path": "macros/schema_tests/_generalized/equal_expression.sql", + "unique_id": "macro.dbt_expectations.test_equal_expression", + "macro_sql": "{% test equal_expression(model, expression,\n compare_model=None,\n compare_expression=None,\n group_by=None,\n compare_group_by=None,\n row_condition=None,\n compare_row_condition=None,\n tolerance=0.0,\n tolerance_percent=None\n ) -%}\n\n {{ adapter.dispatch('test_equal_expression', 'dbt_expectations') (\n model,\n expression,\n compare_model,\n compare_expression,\n group_by,\n compare_group_by,\n row_condition,\n compare_row_condition,\n tolerance,\n tolerance_percent) }}\n{%- endtest %}", + "depends_on": { + "macros": ["macro.dbt_expectations.default__test_equal_expression"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.61725, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_equal_expression": { + "name": "default__test_equal_expression", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/equal_expression.sql", + "original_file_path": "macros/schema_tests/_generalized/equal_expression.sql", + "unique_id": "macro.dbt_expectations.default__test_equal_expression", + "macro_sql": "\n\n{%- macro default__test_equal_expression(\n model,\n expression,\n compare_model,\n compare_expression,\n group_by,\n compare_group_by,\n row_condition,\n compare_row_condition,\n tolerance,\n tolerance_percent) -%}\n\n {%- set compare_model = model if not compare_model else compare_model -%}\n {%- set compare_expression = expression if not compare_expression else compare_expression -%}\n {%- set compare_row_condition = row_condition if not compare_row_condition else compare_row_condition -%}\n {%- set compare_group_by = group_by if not compare_group_by else compare_group_by -%}\n\n {%- set n_cols = (group_by|length) if group_by else 0 %}\n with a as (\n {{ dbt_expectations.get_select(model, expression, row_condition, group_by) }}\n ),\n b as (\n {{ dbt_expectations.get_select(compare_model, compare_expression, compare_row_condition, compare_group_by) }}\n ),\n final as (\n\n select\n {% for i in range(1, n_cols + 1) -%}\n coalesce(a.col_{{ i }}, b.col_{{ i }}) as col_{{ i }},\n {% endfor %}\n a.expression,\n b.expression as compare_expression,\n abs(coalesce(a.expression, 0) - coalesce(b.expression, 0)) as expression_difference,\n abs(coalesce(a.expression, 0) - coalesce(b.expression, 0))/\n nullif(a.expression * 1.0, 0) as expression_difference_percent\n from\n {% if n_cols > 0 %}\n a\n full outer join\n b on\n {% for i in range(1, n_cols + 1) -%}\n a.col_{{ i }} = b.col_{{ i }} {% if not loop.last %}and{% endif %}\n {% endfor -%}\n {% else %}\n a cross join b\n {% endif %}\n )\n -- DEBUG:\n -- select * from final\n select\n *\n from final\n where\n {% if tolerance_percent %}\n expression_difference_percent > {{ tolerance_percent }}\n {% else %}\n expression_difference > {{ tolerance }}\n {% endif %}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_expectations.get_select"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.618588, + "supported_languages": null + }, + "macro.dbt_expectations.truth_expression": { + "name": "truth_expression", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/_truth_expression.sql", + "original_file_path": "macros/schema_tests/_generalized/_truth_expression.sql", + "unique_id": "macro.dbt_expectations.truth_expression", + "macro_sql": "{% macro truth_expression(expression) %}\n {{ adapter.dispatch('truth_expression', 'dbt_expectations') (expression) }}\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_expectations.default__truth_expression"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6188538, + "supported_languages": null + }, + "macro.dbt_expectations.default__truth_expression": { + "name": "default__truth_expression", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/_generalized/_truth_expression.sql", + "original_file_path": "macros/schema_tests/_generalized/_truth_expression.sql", + "unique_id": "macro.dbt_expectations.default__truth_expression", + "macro_sql": "{% macro default__truth_expression(expression) %}\n {{ expression }} as expression\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.618965, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_match_like_pattern": { + "name": "test_expect_column_values_to_match_like_pattern", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_match_like_pattern.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_match_like_pattern.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_match_like_pattern", + "macro_sql": "{% test expect_column_values_to_match_like_pattern(model, column_name,\n like_pattern,\n row_condition=None\n ) %}\n\n{% set expression = dbt_expectations._get_like_pattern_expression(column_name, like_pattern, positive=True) %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_like_pattern_expression", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6195462, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_match_like_pattern_list": { + "name": "test_expect_column_values_to_match_like_pattern_list", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_match_like_pattern_list.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_match_like_pattern_list.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_match_like_pattern_list", + "macro_sql": "{% test expect_column_values_to_match_like_pattern_list(model, column_name,\n like_pattern_list,\n match_on=\"any\",\n row_condition=None\n ) %}\n\n{% set expression %}\n {% for like_pattern in like_pattern_list %}\n {{ dbt_expectations._get_like_pattern_expression(column_name, like_pattern, positive=True) }}\n {%- if not loop.last %}\n {{ \" and \" if match_on == \"all\" else \" or \"}}\n {% endif -%}\n {% endfor %}\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_like_pattern_expression", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6206641, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_match_regex": { + "name": "test_expect_column_values_to_match_regex", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_match_regex.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_match_regex.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_match_regex", + "macro_sql": "{% test expect_column_values_to_match_regex(model, column_name,\n regex,\n row_condition=None,\n is_raw=False,\n flags=\"\"\n ) %}\n\n{% set expression %}\n{{ dbt_expectations.regexp_instr(column_name, regex, is_raw=is_raw, flags=flags) }} > 0\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.regexp_instr", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6214368, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_value_lengths_to_equal": { + "name": "test_expect_column_value_lengths_to_equal", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_value_lengths_to_equal.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_value_lengths_to_equal.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_value_lengths_to_equal", + "macro_sql": "{% test expect_column_value_lengths_to_equal(model, column_name,\n value,\n row_condition=None\n ) %}\n\n{% set expression = dbt.length(column_name) ~ \" = \" ~ value %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt.length", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.62207, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_value_lengths_to_be_between": { + "name": "test_expect_column_value_lengths_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_value_lengths_to_be_between.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_value_lengths_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_value_lengths_to_be_between", + "macro_sql": "{% test expect_column_value_lengths_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n row_condition=None,\n strictly=False\n ) %}\n{% set expression %}\n{{ dbt.length(column_name) }}\n{% endset %}\n\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=None,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt.length", + "macro.dbt_expectations.expression_between" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.623044, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_not_match_regex": { + "name": "test_expect_column_values_to_not_match_regex", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_regex.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_regex.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_not_match_regex", + "macro_sql": "{% test expect_column_values_to_not_match_regex(model, column_name,\n regex,\n row_condition=None,\n is_raw=False,\n flags=\"\"\n ) %}\n\n{% set expression %}\n{{ dbt_expectations.regexp_instr(column_name, regex, is_raw=is_raw, flags=flags) }} = 0\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.regexp_instr", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.623848, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_not_match_regex_list": { + "name": "test_expect_column_values_to_not_match_regex_list", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_regex_list.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_regex_list.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_not_match_regex_list", + "macro_sql": "{% test expect_column_values_to_not_match_regex_list(model, column_name,\n regex_list,\n match_on=\"any\",\n row_condition=None,\n is_raw=False,\n flags=\"\"\n ) %}\n\n{% set expression %}\n{% for regex in regex_list %}\n{{ dbt_expectations.regexp_instr(column_name, regex, is_raw=is_raw, flags=flags) }} = 0\n{%- if not loop.last %}\n{{ \" and \" if match_on == \"all\" else \" or \"}}\n{% endif -%}\n{% endfor %}\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.regexp_instr", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6251369, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_match_regex_list": { + "name": "test_expect_column_values_to_match_regex_list", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_match_regex_list.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_match_regex_list.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_match_regex_list", + "macro_sql": "{% test expect_column_values_to_match_regex_list(model, column_name,\n regex_list,\n match_on=\"any\",\n row_condition=None,\n is_raw=False,\n flags=\"\"\n ) %}\n\n{% set expression %}\n {% for regex in regex_list %}\n {{ dbt_expectations.regexp_instr(column_name, regex, is_raw=is_raw, flags=flags) }} > 0\n {%- if not loop.last %}\n {{ \" and \" if match_on == \"all\" else \" or \"}}\n {% endif -%}\n {% endfor %}\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.regexp_instr", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.626312, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_not_match_like_pattern_list": { + "name": "test_expect_column_values_to_not_match_like_pattern_list", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_like_pattern_list.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_like_pattern_list.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_not_match_like_pattern_list", + "macro_sql": "{% test expect_column_values_to_not_match_like_pattern_list(model, column_name,\n like_pattern_list,\n match_on=\"any\",\n row_condition=None\n ) %}\n\n{% set expression %}\n {% for like_pattern in like_pattern_list %}\n {{ dbt_expectations._get_like_pattern_expression(column_name, like_pattern, positive=False) }}\n {%- if not loop.last %}\n {{ \" and \" if match_on == \"all\" else \" or \"}}\n {% endif -%}\n {% endfor %}\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_like_pattern_expression", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.627383, + "supported_languages": null + }, + "macro.dbt_expectations._get_like_pattern_expression": { + "name": "_get_like_pattern_expression", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/_get_like_pattern_expression.sql", + "original_file_path": "macros/schema_tests/string_matching/_get_like_pattern_expression.sql", + "unique_id": "macro.dbt_expectations._get_like_pattern_expression", + "macro_sql": "{% macro _get_like_pattern_expression(column_name, like_pattern, positive) %}\n{{ column_name }} {{ \"not\" if not positive else \"\" }} like '{{ like_pattern }}'\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.627649, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_not_match_like_pattern": { + "name": "test_expect_column_values_to_not_match_like_pattern", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_like_pattern.sql", + "original_file_path": "macros/schema_tests/string_matching/expect_column_values_to_not_match_like_pattern.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_not_match_like_pattern", + "macro_sql": "{% test expect_column_values_to_not_match_like_pattern(model, column_name,\n like_pattern,\n row_condition=None\n ) %}\n\n{% set expression = dbt_expectations._get_like_pattern_expression(column_name, like_pattern, positive=False) %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_like_pattern_expression", + "macro.dbt_expectations.expression_is_true" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6282098, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_row_values_to_have_recent_data": { + "name": "test_expect_row_values_to_have_recent_data", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_row_values_to_have_recent_data.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_row_values_to_have_recent_data.sql", + "unique_id": "macro.dbt_expectations.test_expect_row_values_to_have_recent_data", + "macro_sql": "{% test expect_row_values_to_have_recent_data(model,\n column_name,\n datepart,\n interval,\n row_condition=None) %}\n\n {{ adapter.dispatch('test_expect_row_values_to_have_recent_data', 'dbt_expectations') (model,\n column_name,\n datepart,\n interval,\n row_condition) }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_row_values_to_have_recent_data" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.629478, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_row_values_to_have_recent_data": { + "name": "default__test_expect_row_values_to_have_recent_data", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_row_values_to_have_recent_data.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_row_values_to_have_recent_data.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_row_values_to_have_recent_data", + "macro_sql": "{% macro default__test_expect_row_values_to_have_recent_data(model, column_name, datepart, interval, row_condition) %}\n{%- set default_start_date = '1970-01-01' -%}\nwith max_recency as (\n\n select max(cast({{ column_name }} as {{ dbt_expectations.type_timestamp() }})) as max_timestamp\n from\n {{ model }}\n where\n -- to exclude erroneous future dates\n cast({{ column_name }} as {{ dbt_expectations.type_timestamp() }}) <= {{ dbt_date.now() }}\n {% if row_condition %}\n and {{ row_condition }}\n {% endif %}\n)\nselect\n *\nfrom\n max_recency\nwhere\n -- if the row_condition excludes all rows, we need to compare against a default date\n -- to avoid false negatives\n coalesce(max_timestamp, cast('{{ default_start_date }}' as {{ dbt_expectations.type_timestamp() }}))\n <\n cast({{ dbt.dateadd(datepart, interval * -1, dbt_date.now()) }} as {{ dbt_expectations.type_timestamp() }})\n\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.type_timestamp", + "macro.dbt_date.now", + "macro.dbt.dateadd" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.630249, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_columns_to_contain_set": { + "name": "test_expect_table_columns_to_contain_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_columns_to_contain_set.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_columns_to_contain_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_columns_to_contain_set", + "macro_sql": "{%- test expect_table_columns_to_contain_set(model, column_list, transform=\"upper\") -%}\n{%- if execute -%}\n {%- set column_list = column_list | map(transform) | list -%}\n {%- set relation_column_names = dbt_expectations._get_column_list(model, transform) -%}\n {%- set matching_columns = dbt_expectations._list_intersect(column_list, relation_column_names) -%}\n with relation_columns as (\n\n {% for col_name in relation_column_names %}\n select cast('{{ col_name }}' as {{ dbt.type_string() }}) as relation_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n ),\n input_columns as (\n\n {% for col_name in column_list %}\n select cast('{{ col_name }}' as {{ dbt.type_string() }}) as input_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n )\n select *\n from\n input_columns i\n left join\n relation_columns r on r.relation_column = i.input_column\n where\n -- catch any column in input list that is not in the list of table columns\n r.relation_column is null\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_column_list", + "macro.dbt_expectations._list_intersect", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.631467, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_row_count_to_equal_other_table": { + "name": "test_expect_table_row_count_to_equal_other_table", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_row_count_to_equal_other_table", + "macro_sql": "{%- test expect_table_row_count_to_equal_other_table(model,\n compare_model,\n group_by=None,\n compare_group_by=None,\n factor=1,\n row_condition=None,\n compare_row_condition=None\n ) -%}\n\n {{ adapter.dispatch('test_expect_table_row_count_to_equal_other_table',\n 'dbt_expectations') (model,\n compare_model,\n group_by,\n compare_group_by,\n factor,\n row_condition,\n compare_row_condition\n ) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_table_row_count_to_equal_other_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.63278, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_table_row_count_to_equal_other_table": { + "name": "default__test_expect_table_row_count_to_equal_other_table", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_table_row_count_to_equal_other_table", + "macro_sql": "\n\n{%- macro default__test_expect_table_row_count_to_equal_other_table(model,\n compare_model,\n group_by,\n compare_group_by,\n factor,\n row_condition,\n compare_row_condition\n ) -%}\n{{ dbt_expectations.test_equal_expression(model, \"count(*)\",\n compare_model=compare_model,\n compare_expression=\"count(*) * \" + factor|string,\n group_by=group_by,\n compare_group_by=compare_group_by,\n row_condition=row_condition,\n compare_row_condition=compare_row_condition\n) }}\n{%- endmacro -%}", + "depends_on": { + "macros": ["macro.dbt_expectations.test_equal_expression"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6331992, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_columns_to_not_contain_set": { + "name": "test_expect_table_columns_to_not_contain_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_columns_to_not_contain_set.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_columns_to_not_contain_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_columns_to_not_contain_set", + "macro_sql": "{%- test expect_table_columns_to_not_contain_set(model, column_list, transform=\"upper\") -%}\n{%- if execute -%}\n {%- set column_list = column_list | map(transform) | list -%}\n {%- set relation_column_names = dbt_expectations._get_column_list(model, transform) -%}\n {%- set matching_columns = dbt_expectations._list_intersect(column_list, relation_column_names) -%}\n with relation_columns as (\n\n {% for col_name in relation_column_names %}\n select cast('{{ col_name }}' as {{ dbt.type_string() }}) as relation_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n ),\n input_columns as (\n\n {% for col_name in column_list %}\n select cast('{{ col_name }}' as {{ dbt.type_string() }}) as input_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n )\n -- catch any column in input list that is in the list of table columns\n select *\n from\n input_columns i\n inner join\n relation_columns r on r.relation_column = i.input_column\n\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_column_list", + "macro.dbt_expectations._list_intersect", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.634461, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_grouped_row_values_to_have_recent_data": { + "name": "test_expect_grouped_row_values_to_have_recent_data", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_grouped_row_values_to_have_recent_data.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_grouped_row_values_to_have_recent_data.sql", + "unique_id": "macro.dbt_expectations.test_expect_grouped_row_values_to_have_recent_data", + "macro_sql": "{% test expect_grouped_row_values_to_have_recent_data(model,\n group_by,\n timestamp_column,\n datepart,\n interval,\n row_condition=None) %}\n\n {{ adapter.dispatch('test_expect_grouped_row_values_to_have_recent_data', 'dbt_expectations') (model,\n group_by,\n timestamp_column,\n datepart,\n interval,\n row_condition) }}\n\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_grouped_row_values_to_have_recent_data" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.637147, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_grouped_row_values_to_have_recent_data": { + "name": "default__test_expect_grouped_row_values_to_have_recent_data", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_grouped_row_values_to_have_recent_data.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_grouped_row_values_to_have_recent_data.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_grouped_row_values_to_have_recent_data", + "macro_sql": "{% macro default__test_expect_grouped_row_values_to_have_recent_data(model,\n group_by,\n timestamp_column,\n datepart,\n interval,\n row_condition) %}\nwith latest_grouped_timestamps as (\n\n select\n {{ group_by | join(\",\") ~ \",\" if group_by }}\n max(1) as join_key,\n max(cast({{ timestamp_column }} as {{ dbt_expectations.type_timestamp() }})) as latest_timestamp_column\n from\n {{ model }}\n where\n -- to exclude erroneous future dates\n cast({{ timestamp_column }} as {{ dbt_expectations.type_timestamp() }}) <= {{ dbt_date.now() }}\n {% if row_condition %}\n and {{ row_condition }}\n {% endif %}\n\n {% if group_by -%}\n {{ dbt_expectations.group_by(group_by | length) }}\n {%- endif %}\n),\ntotal_row_counts as (\n\n select\n {{ group_by | join(\",\") ~ \",\" if group_by }}\n max(1) as join_key,\n count(*) as row_count\n from\n latest_grouped_timestamps\n {% if group_by -%}\n {{ dbt_expectations.group_by(group_by | length) }}\n {%- endif %}\n\n\n),\noutdated_grouped_timestamps as (\n\n select *\n from\n latest_grouped_timestamps\n where\n -- are the max timestamps per group older than the specified cutoff?\n latest_timestamp_column <\n cast(\n {{ dbt.dateadd(datepart, interval * -1, dbt_date.now()) }}\n as {{ dbt_expectations.type_timestamp() }}\n )\n\n),\nvalidation_errors as (\n\n select\n r.row_count,\n t.*\n from\n total_row_counts r\n left join\n outdated_grouped_timestamps t\n on\n {% for g in group_by %}\n r.{{ g }} = t.{{ g }} and\n {% endfor %}\n r.join_key = t.join_key\n where\n -- fail if either no rows were returned due to row_condition,\n -- or the recency test returned failed rows\n r.row_count = 0\n or\n t.join_key is not null\n\n)\nselect * from validation_errors\n{% endmacro %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.type_timestamp", + "macro.dbt_date.now", + "macro.dbt_expectations.group_by", + "macro.dbt.dateadd" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6383328, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_to_exist": { + "name": "test_expect_column_to_exist", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_column_to_exist.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_column_to_exist.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_to_exist", + "macro_sql": "{%- test expect_column_to_exist(model, column_name, column_index=None, transform=\"upper\") -%}\n{%- if execute -%}\n\n {%- set column_name = column_name | map(transform) | join -%}\n {%- set relation_column_names = dbt_expectations._get_column_list(model, transform) -%}\n\n {%- set matching_column_index = relation_column_names.index(column_name) if column_name in relation_column_names else -1 %}\n\n {%- if column_index -%}\n\n {%- set column_index_0 = column_index - 1 if column_index > 0 else 0 -%}\n\n {%- set column_index_matches = true if matching_column_index == column_index_0 else false %}\n\n {%- else -%}\n\n {%- set column_index_matches = true -%}\n\n {%- endif %}\n\n with test_data as (\n\n select\n cast('{{ column_name }}' as {{ dbt.type_string() }}) as column_name,\n {{ matching_column_index }} as matching_column_index,\n {{ column_index_matches }} as column_index_matches\n\n )\n select *\n from test_data\n where\n not(matching_column_index >= 0 and column_index_matches)\n\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_column_list", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.639727, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_row_count_to_equal": { + "name": "test_expect_table_row_count_to_equal", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_row_count_to_equal", + "macro_sql": "{%- test expect_table_row_count_to_equal(model,\n value,\n group_by=None,\n row_condition=None\n ) -%}\n {{ adapter.dispatch('test_expect_table_row_count_to_equal',\n 'dbt_expectations') (model,\n value,\n group_by,\n row_condition\n ) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_table_row_count_to_equal" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.640508, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_table_row_count_to_equal": { + "name": "default__test_expect_table_row_count_to_equal", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_table_row_count_to_equal", + "macro_sql": "\n\n\n\n{%- macro default__test_expect_table_row_count_to_equal(model,\n value,\n group_by,\n row_condition\n ) -%}\n{% set expression %}\ncount(*) = {{ value }}\n{% endset %}\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=group_by,\n row_condition=row_condition)\n }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.640842, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_row_count_to_be_between": { + "name": "test_expect_table_row_count_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_be_between.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_row_count_to_be_between", + "macro_sql": "{%- test expect_table_row_count_to_be_between(model,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) -%}\n {{ adapter.dispatch('test_expect_table_row_count_to_be_between',\n 'dbt_expectations') (model,\n min_value,\n max_value,\n group_by,\n row_condition,\n strictly\n ) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_table_row_count_to_be_between" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.641829, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_table_row_count_to_be_between": { + "name": "default__test_expect_table_row_count_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_be_between.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_be_between.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_table_row_count_to_be_between", + "macro_sql": "\n\n{%- macro default__test_expect_table_row_count_to_be_between(model,\n min_value,\n max_value,\n group_by,\n row_condition,\n strictly\n ) -%}\n{% set expression %}\ncount(*)\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{%- endmacro -%}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6422038, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_row_count_to_equal_other_table_times_factor": { + "name": "test_expect_table_row_count_to_equal_other_table_times_factor", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table_times_factor.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table_times_factor.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_row_count_to_equal_other_table_times_factor", + "macro_sql": "{%- test expect_table_row_count_to_equal_other_table_times_factor(model,\n compare_model,\n factor,\n group_by=None,\n compare_group_by=None,\n row_condition=None,\n compare_row_condition=None\n ) -%}\n {{ adapter.dispatch('test_expect_table_row_count_to_equal_other_table_times_factor',\n 'dbt_expectations') (model,\n compare_model,\n factor,\n group_by,\n compare_group_by,\n row_condition,\n compare_row_condition\n ) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_table_row_count_to_equal_other_table_times_factor" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.643451, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_table_row_count_to_equal_other_table_times_factor": { + "name": "default__test_expect_table_row_count_to_equal_other_table_times_factor", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table_times_factor.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_row_count_to_equal_other_table_times_factor.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_table_row_count_to_equal_other_table_times_factor", + "macro_sql": "\n\n{%- macro default__test_expect_table_row_count_to_equal_other_table_times_factor(model,\n compare_model,\n factor,\n group_by,\n compare_group_by,\n row_condition,\n compare_row_condition\n ) -%}\n\n{{ dbt_expectations.test_expect_table_row_count_to_equal_other_table(model,\n compare_model,\n group_by=group_by,\n compare_group_by=compare_group_by,\n factor=factor,\n row_condition=row_condition,\n compare_row_condition=compare_row_condition\n) }}\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.test_expect_table_row_count_to_equal_other_table" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6437912, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_columns_to_match_set": { + "name": "test_expect_table_columns_to_match_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_columns_to_match_set.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_columns_to_match_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_columns_to_match_set", + "macro_sql": "{%- test expect_table_columns_to_match_set(model, column_list, transform=\"upper\") -%}\n{%- if execute -%}\n {%- set column_list = column_list | map(transform) | list -%}\n {%- set relation_column_names = dbt_expectations._get_column_list(model, transform) -%}\n {%- set matching_columns = dbt_expectations._list_intersect(column_list, relation_column_names) -%}\n with relation_columns as (\n\n {% for col_name in relation_column_names %}\n select cast('{{ col_name }}' as {{ dbt.type_string() }}) as relation_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n ),\n input_columns as (\n\n {% for col_name in column_list %}\n select cast('{{ col_name }}' as {{ dbt.type_string() }}) as input_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n )\n select *\n from\n relation_columns r\n full outer join\n input_columns i on r.relation_column = i.input_column\n where\n -- catch any column in input list that is not in the list of table columns\n -- or any table column that is not in the input list\n r.relation_column is null or\n i.input_column is null\n\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_column_list", + "macro.dbt_expectations._list_intersect", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.645078, + "supported_languages": null + }, + "macro.dbt_expectations._get_column_list": { + "name": "_get_column_list", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/_get_column_list.sql", + "original_file_path": "macros/schema_tests/table_shape/_get_column_list.sql", + "unique_id": "macro.dbt_expectations._get_column_list", + "macro_sql": "{%- macro _get_column_list(model, transform=\"upper\") -%}\n{%- set relation_columns = adapter.get_columns_in_relation(model) -%}\n{%- set relation_column_names = relation_columns | map(attribute=\"name\") | map(transform) | list -%}\n{%- do return(relation_column_names) -%}\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.645479, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_columns_to_match_ordered_list": { + "name": "test_expect_table_columns_to_match_ordered_list", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_columns_to_match_ordered_list.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_columns_to_match_ordered_list.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_columns_to_match_ordered_list", + "macro_sql": "{%- test expect_table_columns_to_match_ordered_list(model, column_list, transform=\"upper\") -%}\n{%- if execute -%}\n {%- set column_list = column_list | map(transform) | list -%}\n {%- set relation_column_names = dbt_expectations._get_column_list(model, transform) -%}\n {%- set matching_columns = dbt_expectations._list_intersect(column_list, relation_column_names) -%}\n with relation_columns as (\n\n {% for col_name in relation_column_names %}\n select\n {{ loop.index }} as relation_column_idx,\n cast('{{ col_name }}' as {{ dbt.type_string() }}) as relation_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n ),\n input_columns as (\n\n {% for col_name in column_list %}\n select\n {{ loop.index }} as input_column_idx,\n cast('{{ col_name }}' as {{ dbt.type_string() }}) as input_column\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n )\n select *\n from\n relation_columns r\n full outer join\n input_columns i on r.relation_column = i.input_column and r.relation_column_idx = i.input_column_idx\n where\n -- catch any column in input list that is not in the sequence of table columns\n -- or any table column that is not in the input sequence\n r.relation_column is null or\n i.input_column is null\n\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { + "macros": [ + "macro.dbt_expectations._get_column_list", + "macro.dbt_expectations._list_intersect", + "macro.dbt.type_string" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.647084, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_aggregation_to_equal_other_table": { + "name": "test_expect_table_aggregation_to_equal_other_table", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_aggregation_to_equal_other_table.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_aggregation_to_equal_other_table.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_aggregation_to_equal_other_table", + "macro_sql": "{%- test expect_table_aggregation_to_equal_other_table(model,\n expression,\n compare_model,\n compare_expression=None,\n group_by=None,\n compare_group_by=None,\n row_condition=None,\n compare_row_condition=None,\n tolerance=0.0,\n tolerance_percent=None\n ) -%}\n\n\n{{ dbt_expectations.test_equal_expression(\n model,\n expression=expression,\n compare_model=compare_model,\n compare_expression=compare_expression,\n group_by=group_by,\n compare_group_by=compare_group_by,\n row_condition=row_condition,\n compare_row_condition=compare_row_condition,\n tolerance=tolerance,\n tolerance_percent=tolerance_percent\n) }}\n\n{%- endtest -%}", + "depends_on": { + "macros": ["macro.dbt_expectations.test_equal_expression"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.647873, + "supported_languages": null + }, + "macro.dbt_expectations._list_intersect": { + "name": "_list_intersect", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/_list_intersect.sql", + "original_file_path": "macros/schema_tests/table_shape/_list_intersect.sql", + "unique_id": "macro.dbt_expectations._list_intersect", + "macro_sql": "{%- macro _list_intersect(list1, list2) -%}\n{%- set matching_items = [] -%}\n{%- for itm in list1 -%}\n {%- if itm in list2 -%}\n {%- do matching_items.append(itm) -%}\n {%- endif -%}\n{%- endfor -%}\n{%- do return(matching_items) -%}\n{%- endmacro -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.648337, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_column_count_to_equal_other_table": { + "name": "test_expect_table_column_count_to_equal_other_table", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_column_count_to_equal_other_table.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_column_count_to_equal_other_table.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_column_count_to_equal_other_table", + "macro_sql": "{%- test expect_table_column_count_to_equal_other_table(model, compare_model) -%}\n{%- if execute -%}\n{%- set number_columns = (adapter.get_columns_in_relation(model) | length) -%}\n{%- set compare_number_columns = (adapter.get_columns_in_relation(compare_model) | length) -%}\nwith test_data as (\n\n select\n {{ number_columns }} as number_columns,\n {{ compare_number_columns }} as compare_number_columns\n\n)\nselect *\nfrom test_data\nwhere\n number_columns != compare_number_columns\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.648858, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_column_count_to_equal": { + "name": "test_expect_table_column_count_to_equal", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_column_count_to_equal.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_column_count_to_equal.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_column_count_to_equal", + "macro_sql": "{%- test expect_table_column_count_to_equal(model, value) -%}\n{%- if execute -%}\n{%- set number_actual_columns = (adapter.get_columns_in_relation(model) | length) -%}\nwith test_data as (\n\n select\n {{ number_actual_columns }} as number_actual_columns,\n {{ value }} as value\n\n)\nselect *\nfrom test_data\nwhere\n number_actual_columns != value\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.649274, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_table_column_count_to_be_between": { + "name": "test_expect_table_column_count_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/table_shape/expect_table_column_count_to_be_between.sql", + "original_file_path": "macros/schema_tests/table_shape/expect_table_column_count_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_table_column_count_to_be_between", + "macro_sql": "{%- test expect_table_column_count_to_be_between(model,\n min_value=None,\n max_value=None\n ) -%}\n{%- if min_value is none and max_value is none -%}\n{{ exceptions.raise_compiler_error(\n \"You have to provide either a min_value, max_value or both.\"\n) }}\n{%- endif -%}\n{%- if execute -%}\n{%- set number_actual_columns = (adapter.get_columns_in_relation(model) | length) -%}\n\n{%- set expression %}\n( 1=1\n{%- if min_value %} and number_actual_columns >= min_value{% endif %}\n{%- if max_value %} and number_actual_columns <= max_value{% endif %}\n)\n{% endset -%}\n\nwith test_data as (\n\n select\n {{ number_actual_columns }} as number_actual_columns,\n {{ min_value if min_value else 0 }} as min_value,\n {{ max_value if max_value else 0 }} as max_value\n\n)\nselect *\nfrom test_data\nwhere\n not {{ expression }}\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6504278, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_not_be_in_set": { + "name": "test_expect_column_values_to_not_be_in_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_not_be_in_set.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_not_be_in_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_not_be_in_set", + "macro_sql": "{% test expect_column_values_to_not_be_in_set(model, column_name,\n value_set,\n quote_values=True,\n row_condition=None\n ) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field\n\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n),\nset_values as (\n\n {% for value in value_set -%}\n select\n {% if quote_values -%}\n cast('{{ value }}' as {{ dbt.type_string() }})\n {%- else -%}\n {{ value }}\n {%- endif %} as value_field\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n),\nvalidation_errors as (\n -- values from the model that match the set\n select\n v.value_field\n from\n all_values v\n join\n set_values s on v.value_field = s.value_field\n\n)\n\nselect *\nfrom validation_errors\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt.type_string"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.651364, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_in_set": { + "name": "test_expect_column_values_to_be_in_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_in_set.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_in_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_in_set", + "macro_sql": "{% test expect_column_values_to_be_in_set(model, column_name,\n value_set,\n quote_values=True,\n row_condition=None\n ) %}\n\nwith all_values as (\n\n select\n {{ column_name }} as value_field\n\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n),\nset_values as (\n\n {% for value in value_set -%}\n select\n {% if quote_values -%}\n cast('{{ value }}' as {{ dbt.type_string() }})\n {%- else -%}\n {{ value }}\n {%- endif %} as value_field\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n),\nvalidation_errors as (\n -- values from the model that are not in the set\n select\n v.value_field\n from\n all_values v\n left join\n set_values s on v.value_field = s.value_field\n where\n s.value_field is null\n\n)\n\nselect *\nfrom validation_errors\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt.type_string"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.652309, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_increasing": { + "name": "test_expect_column_values_to_be_increasing", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_increasing.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_increasing.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_increasing", + "macro_sql": "{% test expect_column_values_to_be_increasing(model, column_name,\n sort_column=None,\n strictly=True,\n row_condition=None,\n group_by=None) %}\n\n{%- set sort_column = column_name if not sort_column else sort_column -%}\n{%- set operator = \">\" if strictly else \">=\" -%}\nwith all_values as (\n\n select\n {{ sort_column }} as sort_column,\n {%- if group_by -%}\n {{ group_by | join(\", \") }},\n {%- endif %}\n {{ column_name }} as value_field\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n),\nadd_lag_values as (\n\n select\n sort_column,\n {%- if group_by -%}\n {{ group_by | join(\", \") }},\n {%- endif %}\n value_field,\n lag(value_field) over\n {%- if not group_by -%}\n (order by sort_column)\n {%- else -%}\n (partition by {{ group_by | join(\", \") }} order by sort_column)\n {%- endif %} as value_field_lag\n from\n all_values\n\n),\nvalidation_errors as (\n select\n *\n from\n add_lag_values\n where\n value_field_lag is not null\n and\n not (value_field {{ operator }} value_field_lag)\n\n)\nselect *\nfrom validation_errors\n{% endtest %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.653839, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_null": { + "name": "test_expect_column_values_to_be_null", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_null.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_null.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_null", + "macro_sql": "{% test expect_column_values_to_be_null(model, column_name, row_condition=None) %}\n\n{% set expression = column_name ~ \" is null\" %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.654352, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_unique": { + "name": "test_expect_column_values_to_be_unique", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_unique.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_unique.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_unique", + "macro_sql": "{% test expect_column_values_to_be_unique(model, column_name, row_condition=None) %}\n{{ dbt_expectations.test_expect_compound_columns_to_be_unique(model, [column_name], row_condition=row_condition) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.test_expect_compound_columns_to_be_unique" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.654644, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_between": { + "name": "test_expect_column_values_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_between.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_between", + "macro_sql": "{% test expect_column_values_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n row_condition=None,\n strictly=False\n ) %}\n\n{% set expression %}\n{{ column_name }}\n{% endset %}\n\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=None,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.655492, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_decreasing": { + "name": "test_expect_column_values_to_be_decreasing", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_decreasing.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_decreasing.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_decreasing", + "macro_sql": "{% test expect_column_values_to_be_decreasing(model, column_name,\n sort_column=None,\n strictly=True,\n row_condition=None,\n group_by=None) %}\n\n{%- set sort_column = column_name if not sort_column else sort_column -%}\n{%- set operator = \"<\" if strictly else \"<=\" %}\nwith all_values as (\n\n select\n {{ sort_column }} as sort_column,\n {%- if group_by -%}\n {{ group_by | join(\", \") }},\n {%- endif %}\n {{ column_name }} as value_field\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n),\nadd_lag_values as (\n\n select\n sort_column,\n value_field,\n lag(value_field) over\n {%- if not group_by -%}\n (order by sort_column)\n {%- else -%}\n (partition by {{ group_by | join(\", \") }} order by sort_column)\n {%- endif %} as value_field_lag\n from\n all_values\n\n),\nvalidation_errors as (\n\n select\n *\n from\n add_lag_values\n where\n value_field_lag is not null\n and\n not (value_field {{ operator }} value_field_lag)\n\n)\nselect *\nfrom validation_errors\n{% endtest %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.656951, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_in_type_list": { + "name": "test_expect_column_values_to_be_in_type_list", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_in_type_list.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_in_type_list.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_in_type_list", + "macro_sql": "{%- test expect_column_values_to_be_in_type_list(model, column_name, column_type_list) -%}\n{%- if execute -%}\n\n {%- set column_name = column_name | upper -%}\n {%- set columns_in_relation = adapter.get_columns_in_relation(model) -%}\n {%- set column_type_list = column_type_list| map(\"upper\") | list -%}\n with relation_columns as (\n\n {% for column in columns_in_relation %}\n select\n cast('{{ escape_single_quotes(column.name | upper) }}' as {{ dbt.type_string() }}) as relation_column,\n cast('{{ column.dtype | upper }}' as {{ dbt.type_string() }}) as relation_column_type\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n ),\n test_data as (\n\n select\n *\n from\n relation_columns\n where\n relation_column = '{{ column_name }}'\n and\n relation_column_type not in ('{{ column_type_list | join(\"', '\") }}')\n\n )\n select *\n from test_data\n\n{%- endif -%}\n{%- endtest -%}", + "depends_on": { + "macros": ["macro.dbt.escape_single_quotes", "macro.dbt.type_string"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.658247, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_of_type": { + "name": "test_expect_column_values_to_be_of_type", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_of_type.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_be_of_type.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_of_type", + "macro_sql": "{%- test expect_column_values_to_be_of_type(model, column_name, column_type) -%}\n{{ dbt_expectations.test_expect_column_values_to_be_in_type_list(model, column_name, [column_type]) }}\n{%- endtest -%}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.test_expect_column_values_to_be_in_type_list" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.658491, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_have_consistent_casing": { + "name": "test_expect_column_values_to_have_consistent_casing", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_have_consistent_casing.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_have_consistent_casing.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_have_consistent_casing", + "macro_sql": "{% test expect_column_values_to_have_consistent_casing(model, column_name, display_inconsistent_columns=False) %}\n\nwith test_data as (\n\n select\n distinct {{ column_name }} as distinct_values\n from\n {{ model }}\n\n ),\n {% if display_inconsistent_columns %}\n validation_errors as (\n\n select\n lower(distinct_values) as inconsistent_columns,\n count(distinct_values) as set_count_case_insensitive\n from\n test_data\n group by 1\n having\n count(distinct_values) > 1\n\n )\n select * from validation_errors\n {% else %}\n validation_errors as (\n\n select\n count(1) as set_count,\n count(distinct lower(distinct_values)) as set_count_case_insensitive\n from\n test_data\n\n )\n select *\n from\n validation_errors\n where\n set_count != set_count_case_insensitive\n {% endif %}\n {%- endtest -%}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.658922, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_not_be_null": { + "name": "test_expect_column_values_to_not_be_null", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/column_values_basic/expect_column_values_to_not_be_null.sql", + "original_file_path": "macros/schema_tests/column_values_basic/expect_column_values_to_not_be_null.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_not_be_null", + "macro_sql": "{% test expect_column_values_to_not_be_null(model, column_name, row_condition=None) %}\n\n{% set expression = column_name ~ \" is not null\" %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6593971, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_min_to_be_between": { + "name": "test_expect_column_min_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_min_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_min_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_min_to_be_between", + "macro_sql": "{% test expect_column_min_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n{% set expression %}\nmin({{ column_name }})\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.66022, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_unique_value_count_to_be_between": { + "name": "test_expect_column_unique_value_count_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_unique_value_count_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_unique_value_count_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_unique_value_count_to_be_between", + "macro_sql": "{% test expect_column_unique_value_count_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n{% set expression %}\ncount(distinct {{ column_name }})\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.66106, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_quantile_values_to_be_between": { + "name": "test_expect_column_quantile_values_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_quantile_values_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_quantile_values_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_quantile_values_to_be_between", + "macro_sql": "{% test expect_column_quantile_values_to_be_between(model, column_name,\n quantile,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n\n{% set expression %}\n{{ dbt_expectations.percentile_cont(column_name, quantile) }}\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.percentile_cont", + "macro.dbt_expectations.expression_between" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.661966, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_median_to_be_between": { + "name": "test_expect_column_median_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_median_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_median_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_median_to_be_between", + "macro_sql": "{% test expect_column_median_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n\n{% set expression %}\n{{ dbt_expectations.median(column_name) }}\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{% endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.median", + "macro.dbt_expectations.expression_between" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.662826, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_proportion_of_unique_values_to_be_between": { + "name": "test_expect_column_proportion_of_unique_values_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_proportion_of_unique_values_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_proportion_of_unique_values_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_proportion_of_unique_values_to_be_between", + "macro_sql": "{% test expect_column_proportion_of_unique_values_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n{% set expression %}\ncount(distinct {{ column_name }})*1.0/count({{ column_name }})\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.66376, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_distinct_values_to_equal_set": { + "name": "test_expect_column_distinct_values_to_equal_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_distinct_values_to_equal_set.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_distinct_values_to_equal_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_distinct_values_to_equal_set", + "macro_sql": "{% test expect_column_distinct_values_to_equal_set(model, column_name,\n value_set,\n quote_values=True,\n row_condition=None\n ) %}\n\nwith all_values as (\n\n select distinct\n {{ column_name }} as column_value\n\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n),\nset_values as (\n\n {% for value in value_set -%}\n select\n {% if quote_values -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif %} as value_field\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n\n),\nunique_set_values as (\n\n select distinct value_field\n from\n set_values\n\n),\nvalidation_errors as (\n\n select\n *\n from\n all_values v\n full outer join\n unique_set_values s on v.column_value = s.value_field\n where\n v.column_value is null or\n s.value_field is null\n\n)\n\nselect *\nfrom validation_errors\n\n{% endtest %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.664706, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_most_common_value_to_be_in_set": { + "name": "test_expect_column_most_common_value_to_be_in_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_most_common_value_to_be_in_set.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_most_common_value_to_be_in_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_most_common_value_to_be_in_set", + "macro_sql": "{% test expect_column_most_common_value_to_be_in_set(model,\n column_name,\n value_set,\n top_n,\n quote_values=True,\n data_type=\"decimal\",\n row_condition=None\n ) -%}\n\n {{ adapter.dispatch('test_expect_column_most_common_value_to_be_in_set', 'dbt_expectations') (\n model, column_name, value_set, top_n, quote_values, data_type, row_condition\n ) }}\n\n{%- endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_column_most_common_value_to_be_in_set" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6666088, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_column_most_common_value_to_be_in_set": { + "name": "default__test_expect_column_most_common_value_to_be_in_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_most_common_value_to_be_in_set.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_most_common_value_to_be_in_set.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_column_most_common_value_to_be_in_set", + "macro_sql": "{% macro default__test_expect_column_most_common_value_to_be_in_set(model,\n column_name,\n value_set,\n top_n,\n quote_values,\n data_type,\n row_condition\n ) %}\n\nwith value_counts as (\n\n select\n {% if quote_values -%}\n {{ column_name }}\n {%- else -%}\n cast({{ column_name }} as {{ data_type }})\n {%- endif %} as value_field,\n count(*) as value_count\n\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n group by {% if quote_values -%}\n {{ column_name }}\n {%- else -%}\n cast({{ column_name }} as {{ data_type }})\n {%- endif %}\n\n),\nvalue_counts_ranked as (\n\n select\n *,\n row_number() over(order by value_count desc) as value_count_rank\n from\n value_counts\n\n),\nvalue_count_top_n as (\n\n select\n value_field\n from\n value_counts_ranked\n where\n value_count_rank = {{ top_n }}\n\n),\nset_values as (\n\n {% for value in value_set -%}\n select\n {% if quote_values -%}\n '{{ value }}'\n {%- else -%}\n cast({{ value }} as {{ data_type }})\n {%- endif %} as value_field\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n\n),\nunique_set_values as (\n\n select distinct value_field\n from\n set_values\n\n),\nvalidation_errors as (\n -- values from the model that are not in the set\n select\n value_field\n from\n value_count_top_n\n where\n value_field not in (select value_field from unique_set_values)\n\n)\n\nselect *\nfrom validation_errors\n\n{% endmacro %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.667686, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_distinct_values_to_contain_set": { + "name": "test_expect_column_distinct_values_to_contain_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_distinct_values_to_contain_set.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_distinct_values_to_contain_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_distinct_values_to_contain_set", + "macro_sql": "{% test expect_column_distinct_values_to_contain_set(model, column_name,\n value_set,\n quote_values=True,\n row_condition=None\n ) %}\n\nwith all_values as (\n\n select distinct\n {{ column_name }} as value_field\n\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n),\nset_values as (\n\n {% for value in value_set -%}\n select\n {% if quote_values -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif %} as value_field\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n\n),\nunique_set_values as (\n\n select distinct value_field\n from\n set_values\n\n),\nvalidation_errors as (\n -- values in set that are not in the list of values from the model\n select\n s.value_field\n from\n unique_set_values s\n left join\n all_values v on s.value_field = v.value_field\n where\n v.value_field is null\n\n)\n\nselect *\nfrom validation_errors\n\n{% endtest %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.66875, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_distinct_count_to_equal": { + "name": "test_expect_column_distinct_count_to_equal", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_equal.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_equal.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_distinct_count_to_equal", + "macro_sql": "{% test expect_column_distinct_count_to_equal(model,\n column_name,\n value,\n group_by=None,\n row_condition=None\n ) %}\n{% set expression %}\ncount(distinct {{ column_name }}) = {{ value }}\n{% endset %}\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=group_by,\n row_condition=row_condition)\n }}\n{%- endtest -%}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.669418, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_distinct_count_to_be_less_than": { + "name": "test_expect_column_distinct_count_to_be_less_than", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_be_less_than.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_be_less_than.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_distinct_count_to_be_less_than", + "macro_sql": "{% test expect_column_distinct_count_to_be_less_than(model,\n column_name,\n value,\n group_by=None,\n row_condition=None\n ) %}\n{% set expression %}\ncount(distinct {{ column_name }}) < {{ value }}\n{% endset %}\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=group_by,\n row_condition=row_condition)\n }}\n{%- endtest -%}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6700742, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_sum_to_be_between": { + "name": "test_expect_column_sum_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_sum_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_sum_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_sum_to_be_between", + "macro_sql": "{% test expect_column_sum_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n{% set expression %}\nsum({{ column_name }})\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.671083, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_stdev_to_be_between": { + "name": "test_expect_column_stdev_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_stdev_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_stdev_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_stdev_to_be_between", + "macro_sql": "{% test expect_column_stdev_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) -%}\n {{ adapter.dispatch('test_expect_column_stdev_to_be_between', 'dbt_expectations') (\n model, column_name,\n min_value,\n max_value,\n group_by,\n row_condition,\n strictly\n ) }}\n{%- endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_column_stdev_to_be_between" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.672696, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_column_stdev_to_be_between": { + "name": "default__test_expect_column_stdev_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_stdev_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_stdev_to_be_between.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_column_stdev_to_be_between", + "macro_sql": "{% macro default__test_expect_column_stdev_to_be_between(\n model, column_name,\n min_value,\n max_value,\n group_by,\n row_condition,\n strictly\n ) %}\n\n{% set expression %}\nstddev({{ column_name }})\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{% endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.673132, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_mean_to_be_between": { + "name": "test_expect_column_mean_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_mean_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_mean_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_mean_to_be_between", + "macro_sql": "{% test expect_column_mean_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n{% set expression %}\navg({{ column_name }})\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.673994, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_max_to_be_between": { + "name": "test_expect_column_max_to_be_between", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_max_to_be_between.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_max_to_be_between.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_max_to_be_between", + "macro_sql": "{% test expect_column_max_to_be_between(model, column_name,\n min_value=None,\n max_value=None,\n group_by=None,\n row_condition=None,\n strictly=False\n ) %}\n{% set expression %}\nmax({{ column_name }})\n{% endset %}\n{{ dbt_expectations.expression_between(model,\n expression=expression,\n min_value=min_value,\n max_value=max_value,\n group_by_columns=group_by,\n row_condition=row_condition,\n strictly=strictly\n ) }}\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_between"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.674836, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_distinct_count_to_be_greater_than": { + "name": "test_expect_column_distinct_count_to_be_greater_than", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_be_greater_than.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_be_greater_than.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_distinct_count_to_be_greater_than", + "macro_sql": "{% test expect_column_distinct_count_to_be_greater_than(model,\n column_name,\n value,\n group_by=None,\n row_condition=None\n ) %}\n{% set expression %}\ncount(distinct {{ column_name }}) > {{ value }}\n{% endset %}\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=group_by,\n row_condition=row_condition)\n }}\n{%- endtest -%}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.675485, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_distinct_values_to_be_in_set": { + "name": "test_expect_column_distinct_values_to_be_in_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_distinct_values_to_be_in_set.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_distinct_values_to_be_in_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_distinct_values_to_be_in_set", + "macro_sql": "{% test expect_column_distinct_values_to_be_in_set(model,\n column_name,\n value_set,\n quote_values=True,\n row_condition=None\n ) %}\n\nwith all_values as (\n\n select distinct\n {{ column_name }} as value_field\n\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n),\nset_values as (\n\n {% for value in value_set -%}\n select\n {% if quote_values -%}\n '{{ value }}'\n {%- else -%}\n {{ value }}\n {%- endif %} as value_field\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n\n),\nunique_set_values as (\n\n select distinct value_field\n from\n set_values\n\n),\nvalidation_errors as (\n -- values from the model that are not in the set\n select\n v.value_field\n from\n all_values v\n left join\n unique_set_values s on v.value_field = s.value_field\n where\n s.value_field is null\n\n)\n\nselect *\nfrom validation_errors\n\n{% endtest %}", + "depends_on": { "macros": [] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.676438, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_distinct_count_to_equal_other_table": { + "name": "test_expect_column_distinct_count_to_equal_other_table", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_equal_other_table.sql", + "original_file_path": "macros/schema_tests/aggregate_functions/expect_column_distinct_count_to_equal_other_table.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_distinct_count_to_equal_other_table", + "macro_sql": "{% test expect_column_distinct_count_to_equal_other_table(model,\n compare_model,\n column_name,\n compare_column_name,\n row_condition=None,\n compare_row_condition=None\n ) %}\n{%- set expression -%}\ncount(distinct {{ column_name }})\n{%- endset -%}\n{%- set compare_expression -%}\n{%- if compare_column_name -%}\ncount(distinct {{ compare_column_name }})\n{%- else -%}\n{{ expression }}\n{%- endif -%}\n{%- endset -%}\n{{ dbt_expectations.test_equal_expression(\n model,\n expression=expression,\n compare_model=compare_model,\n compare_expression=compare_expression,\n row_condition=row_condition,\n compare_row_condition=compare_row_condition\n) }}\n{%- endtest -%}", + "depends_on": { + "macros": ["macro.dbt_expectations.test_equal_expression"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.677221, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_row_values_to_have_data_for_every_n_datepart": { + "name": "test_expect_row_values_to_have_data_for_every_n_datepart", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/distributional/expect_row_values_to_have_data_for_every_n_datepart.sql", + "original_file_path": "macros/schema_tests/distributional/expect_row_values_to_have_data_for_every_n_datepart.sql", + "unique_id": "macro.dbt_expectations.test_expect_row_values_to_have_data_for_every_n_datepart", + "macro_sql": "{%- test expect_row_values_to_have_data_for_every_n_datepart(model,\n date_col,\n date_part=\"day\",\n interval=None,\n row_condition=None,\n exclusion_condition=None,\n test_start_date=None,\n test_end_date=None) -%}\n{% if not execute %}\n {{ return('') }}\n{% endif %}\n\n{% if not test_start_date or not test_end_date %}\n {% set sql %}\n\n select\n min({{ date_col }}) as start_{{ date_part }},\n max({{ date_col }}) as end_{{ date_part }}\n from {{ model }}\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n\n {% endset %}\n\n {%- set dr = run_query(sql) -%}\n {%- set db_start_date = dr.columns[0].values()[0].strftime('%Y-%m-%d') -%}\n {%- set db_end_date = dr.columns[1].values()[0].strftime('%Y-%m-%d') -%}\n\n{% endif %}\n\n{% if not test_start_date %}\n{% set start_date = db_start_date %}\n{% else %}\n{% set start_date = test_start_date %}\n{% endif %}\n\n\n{% if not test_end_date %}\n{% set end_date = db_end_date %}\n{% else %}\n{% set end_date = test_end_date %}\n{% endif %}\nwith base_dates as (\n\n {{ dbt_date.get_base_dates(start_date=start_date, end_date=end_date, datepart=date_part) }}\n {% if interval %}\n {#\n Filter the date spine created above down to the interval granularity using a modulo operation.\n The number of date_parts after the start_date divided by the integer interval will produce no remainder for the desired intervals,\n e.g. for 2-day interval from a starting Jan 1, 2020:\n params: start_date = '2020-01-01', date_part = 'day', interval = 2\n date spine created above: [2020-01-01, 2020-01-02, 2020-01-03, 2020-01-04, 2020-01-05, ...]\n The first parameter to the `mod` function would be the number of days between the start_date and the spine date, i.e. [0, 1, 2, 3, 4 ...]\n The second parameter to the `mod` function would be the integer interval, i.e. 2\n This modulo operation produces the following remainders: [0, 1, 0, 1, 0, ...]\n Filtering the spine only where this remainder == 0 will return a spine with every other day as desired, i.e. [2020-01-01, 2020-01-03, 2020-01-05, ...]\n #}\n where mod(\n cast({{ dbt.datediff(\"'\" ~ start_date ~ \"'\", 'date_' ~ date_part, date_part) }} as {{ dbt.type_int() }}),\n cast({{interval}} as {{ dbt.type_int() }})\n ) = 0\n {% endif %}\n\n),\nmodel_data as (\n\n select\n {% if not interval %}\n\n cast({{ dbt.date_trunc(date_part, date_col) }} as {{ dbt_expectations.type_datetime() }}) as date_{{ date_part }},\n\n {% else %}\n {#\n Use a modulo operator to determine the number of intervals that a date_col is away from the interval-date spine\n and subtracts that amount to effectively slice each date_col record into its corresponding spine bucket,\n e.g. given a date_col of with records [2020-01-01, 2020-01-02, 2020-01-03, 2020-01-11, 2020-01-12]\n if we want to slice these dates into their 2-day buckets starting Jan 1, 2020 (start_date = '2020-01-01', date_part='day', interval=2),\n the modulo operation described above will produce these remainders: [0, 1, 0, 0, 1]\n subtracting that number of days from the observations will produce records [2020-01-01, 2020-01-01, 2020-01-03, 2020-01-11, 2020-01-11],\n all of which align with records from the interval-date spine\n #}\n {{ dbt.dateadd(\n date_part,\n \"mod(\n cast(\" ~ dbt.datediff(\"'\" ~ start_date ~ \"'\", date_col, date_part) ~ \" as \" ~ dbt.type_int() ~ \" ),\n cast(\" ~ interval ~ \" as \" ~ dbt.type_int() ~ \" )\n ) * (-1)\",\n \"cast( \" ~ dbt.date_trunc(date_part, date_col) ~ \" as \" ~ dbt_expectations.type_datetime() ~ \")\"\n )}} as date_{{ date_part }},\n\n {% endif %}\n\n count(*) as row_cnt\n from\n {{ model }} f\n {% if row_condition %}\n where {{ row_condition }}\n {% endif %}\n group by\n date_{{date_part}}\n\n),\n\nfinal as (\n\n select\n cast(d.date_{{ date_part }} as {{ dbt_expectations.type_datetime() }}) as date_{{ date_part }},\n case when f.date_{{ date_part }} is null then true else false end as is_missing,\n coalesce(f.row_cnt, 0) as row_cnt\n from\n base_dates d\n left join\n model_data f on cast(d.date_{{ date_part }} as {{ dbt_expectations.type_datetime() }}) = f.date_{{ date_part }}\n)\nselect\n *\nfrom final\nwhere row_cnt = 0\n{% if exclusion_condition %}\n and {{ exclusion_condition }}\n{% endif %}\n{%- endtest -%}", + "depends_on": { + "macros": [ + "macro.dbt.run_query", + "macro.dbt_date.get_base_dates", + "macro.dbt.datediff", + "macro.dbt.type_int", + "macro.dbt.date_trunc", + "macro.dbt_expectations.type_datetime", + "macro.dbt.dateadd" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.681776, + "supported_languages": null + }, + "macro.dbt_expectations._get_metric_expression": { + "name": "_get_metric_expression", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_moving_stdevs.sql", + "original_file_path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_moving_stdevs.sql", + "unique_id": "macro.dbt_expectations._get_metric_expression", + "macro_sql": "{%- macro _get_metric_expression(metric_column, take_logs) -%}\n\n{%- if take_logs %}\n{%- set expr = \"nullif(\" ~ metric_column ~ \", 0)\" -%}\ncoalesce({{ dbt_expectations.log_natural(expr) }}, 0)\n{%- else -%}\ncoalesce({{ metric_column }}, 0)\n{%- endif %}\n\n{%- endmacro -%}\n\n", + "depends_on": { "macros": ["macro.dbt_expectations.log_natural"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.686559, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_within_n_moving_stdevs": { + "name": "test_expect_column_values_to_be_within_n_moving_stdevs", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_moving_stdevs.sql", + "original_file_path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_moving_stdevs.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_within_n_moving_stdevs", + "macro_sql": "{% test expect_column_values_to_be_within_n_moving_stdevs(model,\n column_name,\n date_column_name,\n group_by=None,\n period='day',\n lookback_periods=1,\n trend_periods=7,\n test_periods=14,\n sigma_threshold=3,\n sigma_threshold_upper=None,\n sigma_threshold_lower=None,\n take_diffs=true,\n take_logs=true\n ) -%}\n {{ adapter.dispatch('test_expect_column_values_to_be_within_n_moving_stdevs', 'dbt_expectations') (model,\n column_name,\n date_column_name,\n group_by,\n period,\n lookback_periods,\n trend_periods,\n test_periods,\n sigma_threshold,\n sigma_threshold_upper,\n sigma_threshold_lower,\n take_diffs,\n take_logs\n ) }}\n{%- endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_column_values_to_be_within_n_moving_stdevs" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.68714, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_column_values_to_be_within_n_moving_stdevs": { + "name": "default__test_expect_column_values_to_be_within_n_moving_stdevs", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_moving_stdevs.sql", + "original_file_path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_moving_stdevs.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_column_values_to_be_within_n_moving_stdevs", + "macro_sql": "{% macro default__test_expect_column_values_to_be_within_n_moving_stdevs(model,\n column_name,\n date_column_name,\n group_by,\n period,\n lookback_periods,\n trend_periods,\n test_periods,\n sigma_threshold,\n sigma_threshold_upper,\n sigma_threshold_lower,\n take_diffs,\n take_logs\n ) %}\n\n{%- set sigma_threshold_upper = sigma_threshold_upper if sigma_threshold_upper else sigma_threshold -%}\n{%- set sigma_threshold_lower = sigma_threshold_lower if sigma_threshold_lower else -1 * sigma_threshold -%}\n{%- set partition_by = \"partition by \" ~ (group_by | join(\",\")) if group_by -%}\n{%- set group_by_length = (group_by | length ) if group_by else 0 -%}\n\nwith metric_values as (\n\n with grouped_metric_values as (\n\n select\n {{ dbt.date_trunc(period, date_column_name) }} as metric_period,\n {{ group_by | join(\",\") ~ \",\" if group_by }}\n sum({{ column_name }}) as agg_metric_value\n from\n {{ model }}\n {{ dbt_expectations.group_by(1 + group_by_length) }}\n\n )\n {%- if take_diffs %}\n , grouped_metric_values_with_priors as (\n\n select\n *,\n lag(agg_metric_value, {{ lookback_periods }}) over(\n {{ partition_by }}\n order by metric_period) as prior_agg_metric_value\n from\n grouped_metric_values d\n\n )\n select\n *,\n {{ dbt_expectations._get_metric_expression(\"agg_metric_value\", take_logs) }}\n -\n {{ dbt_expectations._get_metric_expression(\"prior_agg_metric_value\", take_logs) }}\n as metric_test_value\n from\n grouped_metric_values_with_priors d\n\n {%- else %}\n\n select\n *,\n {{ dbt_expectations._get_metric_expression(\"agg_metric_value\", take_logs) }}\n as metric_test_value\n from\n grouped_metric_values\n\n {%- endif %}\n\n),\nmetric_moving_calcs as (\n\n select\n *,\n avg(metric_test_value)\n over({{ partition_by }}\n order by metric_period rows\n between {{ trend_periods }} preceding and 1 preceding) as metric_test_rolling_average,\n stddev(metric_test_value)\n over({{ partition_by }}\n order by metric_period rows\n between {{ trend_periods }} preceding and 1 preceding) as metric_test_rolling_stddev\n from\n metric_values\n\n),\nmetric_sigma as (\n\n select\n *,\n (metric_test_value - metric_test_rolling_average) as metric_test_delta,\n (metric_test_value - metric_test_rolling_average)/\n nullif(metric_test_rolling_stddev, 0) as metric_test_sigma\n from\n metric_moving_calcs\n\n)\nselect\n *\nfrom\n metric_sigma\nwhere\n\n metric_period >= cast(\n {{ dbt.dateadd(period, -test_periods, dbt.date_trunc(period, dbt_date.now())) }}\n as {{ dbt_expectations.type_timestamp() }})\n and\n metric_period < {{ dbt.date_trunc(period, dbt_date.now()) }}\n and\n\n not (\n metric_test_sigma >= {{ sigma_threshold_lower }} and\n metric_test_sigma <= {{ sigma_threshold_upper }}\n )\n{%- endmacro -%}", + "depends_on": { + "macros": [ + "macro.dbt.date_trunc", + "macro.dbt_expectations.group_by", + "macro.dbt_expectations._get_metric_expression", + "macro.dbt.dateadd", + "macro.dbt_date.now", + "macro.dbt_expectations.type_timestamp" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.68873, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_values_to_be_within_n_stdevs": { + "name": "test_expect_column_values_to_be_within_n_stdevs", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_stdevs.sql", + "original_file_path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_stdevs.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_values_to_be_within_n_stdevs", + "macro_sql": "{% test expect_column_values_to_be_within_n_stdevs(model,\n column_name,\n group_by=None,\n sigma_threshold=3\n ) -%}\n {{\n adapter.dispatch('test_expect_column_values_to_be_within_n_stdevs', 'dbt_expectations') (\n model, column_name, group_by, sigma_threshold\n )\n }}\n{%- endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_column_values_to_be_within_n_stdevs" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.689871, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_column_values_to_be_within_n_stdevs": { + "name": "default__test_expect_column_values_to_be_within_n_stdevs", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_stdevs.sql", + "original_file_path": "macros/schema_tests/distributional/expect_column_values_to_be_within_n_stdevs.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_column_values_to_be_within_n_stdevs", + "macro_sql": "{% macro default__test_expect_column_values_to_be_within_n_stdevs(model,\n column_name,\n group_by,\n sigma_threshold\n ) %}\n\nwith metric_values as (\n\n select\n {{ group_by | join(\",\") ~ \",\" if group_by }}\n sum({{ column_name }}) as {{ column_name }}\n from\n {{ model }}\n {% if group_by -%}\n {{ dbt_expectations.group_by(group_by | length) }}\n {%- endif %}\n\n),\nmetric_values_with_statistics as (\n\n select\n *,\n avg({{ column_name }}) over() as {{ column_name }}_average,\n stddev({{ column_name }}) over() as {{ column_name }}_stddev\n from\n metric_values\n\n),\nmetric_values_z_scores as (\n\n select\n *,\n ({{ column_name }} - {{ column_name }}_average)/\n nullif({{ column_name }}_stddev, 0) as {{ column_name }}_sigma\n from\n metric_values_with_statistics\n\n)\nselect\n *\nfrom\n metric_values_z_scores\nwhere\n abs({{ column_name }}_sigma) > {{ sigma_threshold }}\n{%- endmacro %}", + "depends_on": { "macros": ["macro.dbt_expectations.group_by"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.690495, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_select_column_values_to_be_unique_within_record": { + "name": "test_expect_select_column_values_to_be_unique_within_record", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/multi-column/expect_select_column_values_to_be_unique_within_record.sql", + "original_file_path": "macros/schema_tests/multi-column/expect_select_column_values_to_be_unique_within_record.sql", + "unique_id": "macro.dbt_expectations.test_expect_select_column_values_to_be_unique_within_record", + "macro_sql": "{% test expect_select_column_values_to_be_unique_within_record(model,\n column_list,\n quote_columns=False,\n ignore_row_if=\"all_values_are_missing\",\n row_condition=None\n ) -%}\n {{ adapter.dispatch('test_expect_select_column_values_to_be_unique_within_record', 'dbt_expectations') (model, column_list, quote_columns, ignore_row_if, row_condition) }}\n{%- endtest %}", + "depends_on": { + "macros": [ + "macro.dbt_expectations.default__test_expect_select_column_values_to_be_unique_within_record" + ] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.692097, + "supported_languages": null + }, + "macro.dbt_expectations.default__test_expect_select_column_values_to_be_unique_within_record": { + "name": "default__test_expect_select_column_values_to_be_unique_within_record", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/multi-column/expect_select_column_values_to_be_unique_within_record.sql", + "original_file_path": "macros/schema_tests/multi-column/expect_select_column_values_to_be_unique_within_record.sql", + "unique_id": "macro.dbt_expectations.default__test_expect_select_column_values_to_be_unique_within_record", + "macro_sql": "{% macro default__test_expect_select_column_values_to_be_unique_within_record(model,\n column_list,\n quote_columns,\n ignore_row_if,\n row_condition\n ) %}\n\n{% if not quote_columns %}\n {%- set columns=column_list %}\n{% elif quote_columns %}\n {%- set columns=[] %}\n {% for column in column_list -%}\n {% set columns = columns.append( adapter.quote(column) ) %}\n {%- endfor %}\n{% else %}\n {{ exceptions.raise_compiler_error(\n \"`quote_columns` argument for unique_combination_of_columns test must be one of [True, False] Got: '\" ~ quote_columns ~\"'.'\"\n ) }}\n{% endif %}\n\n{%- set row_condition_ext -%}\n\n {%- if row_condition %}\n {{ row_condition }} and\n {% endif -%}\n\n {{ dbt_expectations.ignore_row_if_expression(ignore_row_if, columns) }}\n\n{%- endset -%}\n\nwith column_values as (\n\n select\n row_number() over(order by 1) as row_index,\n {% for column in columns -%}\n {{ column }}{% if not loop.last %},{% endif %}\n {%- endfor %}\n from {{ model }}\n where\n 1=1\n {%- if row_condition_ext %}\n and {{ row_condition_ext }}\n {% endif %}\n\n),\nunpivot_columns as (\n\n {% for column in columns %}\n select row_index, '{{ column }}' as column_name, {{ column }} as column_value from column_values\n {% if not loop.last %}union all{% endif %}\n {% endfor %}\n),\nvalidation_errors as (\n\n select\n row_index,\n count(distinct column_value) as column_values\n from unpivot_columns\n group by 1\n having count(distinct column_value) < {{ columns | length }}\n\n)\nselect * from validation_errors\n{% endmacro %}", + "depends_on": { + "macros": ["macro.dbt_expectations.ignore_row_if_expression"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.693275, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_compound_columns_to_be_unique": { + "name": "test_expect_compound_columns_to_be_unique", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/multi-column/expect_compound_columns_to_be_unique.sql", + "original_file_path": "macros/schema_tests/multi-column/expect_compound_columns_to_be_unique.sql", + "unique_id": "macro.dbt_expectations.test_expect_compound_columns_to_be_unique", + "macro_sql": "{% test expect_compound_columns_to_be_unique(model,\n column_list,\n quote_columns=False,\n ignore_row_if=\"all_values_are_missing\",\n row_condition=None\n ) %}\n{% if not column_list %}\n {{ exceptions.raise_compiler_error(\n \"`column_list` must be specified as a list of columns. Got: '\" ~ column_list ~\"'.'\"\n ) }}\n{% endif %}\n\n{% if not quote_columns %}\n {%- set columns=column_list %}\n{% elif quote_columns %}\n {%- set columns=[] %}\n {% for column in column_list -%}\n {% set columns = columns.append( adapter.quote(column) ) %}\n {%- endfor %}\n{% else %}\n {{ exceptions.raise_compiler_error(\n \"`quote_columns` argument for expect_compound_columns_to_be_unique test must be one of [True, False] Got: '\" ~ quote_columns ~\"'.'\"\n ) }}\n{% endif %}\n\n{%- set row_condition_ext -%}\n\n {%- if row_condition %}\n {{ row_condition }} and\n {% endif -%}\n\n {{ dbt_expectations.ignore_row_if_expression(ignore_row_if, columns) }}\n\n{%- endset -%}\n\nwith validation_errors as (\n\n select\n {% for column in columns -%}\n {{ column }}{% if not loop.last %},{% endif %}\n {%- endfor %}\n from {{ model }}\n where\n 1=1\n {%- if row_condition_ext %}\n and {{ row_condition_ext }}\n {% endif %}\n group by\n {% for column in columns -%}\n {{ column }}{% if not loop.last %},{% endif %}\n {%- endfor %}\n having count(*) > 1\n\n)\nselect * from validation_errors\n{% endtest %}", + "depends_on": { + "macros": ["macro.dbt_expectations.ignore_row_if_expression"] + }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6953778, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_multicolumn_sum_to_equal": { + "name": "test_expect_multicolumn_sum_to_equal", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/multi-column/expect_multicolumn_sum_to_equal.sql", + "original_file_path": "macros/schema_tests/multi-column/expect_multicolumn_sum_to_equal.sql", + "unique_id": "macro.dbt_expectations.test_expect_multicolumn_sum_to_equal", + "macro_sql": "{% test expect_multicolumn_sum_to_equal(model,\n column_list,\n sum_total,\n group_by=None,\n row_condition=None\n ) %}\n\n{% set expression %}\n{% for column in column_list %}\nsum({{ column }}){% if not loop.last %} + {% endif %}\n{% endfor %} = {{ sum_total }}\n{% endset %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=group_by,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6964512, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_pair_values_to_be_equal": { + "name": "test_expect_column_pair_values_to_be_equal", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/multi-column/expect_column_pair_values_to_be_equal.sql", + "original_file_path": "macros/schema_tests/multi-column/expect_column_pair_values_to_be_equal.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_pair_values_to_be_equal", + "macro_sql": "{% test expect_column_pair_values_to_be_equal(model,\n column_A,\n column_B,\n row_condition=None\n ) %}\n\n{% set operator = \"=\" %}\n{% set expression = column_A ~ \" \" ~ operator ~ \" \" ~ column_B %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6972082, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_pair_values_A_to_be_greater_than_B": { + "name": "test_expect_column_pair_values_A_to_be_greater_than_B", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/multi-column/expect_column_pair_values_A_to_be_greater_than_B.sql", + "original_file_path": "macros/schema_tests/multi-column/expect_column_pair_values_A_to_be_greater_than_B.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_pair_values_A_to_be_greater_than_B", + "macro_sql": "{% test expect_column_pair_values_A_to_be_greater_than_B(model,\n column_A,\n column_B,\n or_equal=False,\n row_condition=None\n ) %}\n\n{% set operator = \">=\" if or_equal else \">\" %}\n{% set expression = column_A ~ \" \" ~ operator ~ \" \" ~ column_B %}\n\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.6980789, + "supported_languages": null + }, + "macro.dbt_expectations.test_expect_column_pair_values_to_be_in_set": { + "name": "test_expect_column_pair_values_to_be_in_set", + "resource_type": "macro", + "package_name": "dbt_expectations", + "path": "macros/schema_tests/multi-column/expect_column_pair_values_to_be_in_set.sql", + "original_file_path": "macros/schema_tests/multi-column/expect_column_pair_values_to_be_in_set.sql", + "unique_id": "macro.dbt_expectations.test_expect_column_pair_values_to_be_in_set", + "macro_sql": "{% test expect_column_pair_values_to_be_in_set(model,\n column_A,\n column_B,\n value_pairs_set,\n row_condition=None\n ) %}\n\n{% set expression %}\n{% for pair in value_pairs_set %}\n{%- if (pair | length) == 2 %}\n({{ column_A }} = {{ pair[0] }} and {{ column_B }} = {{ pair[1] }}){% if not loop.last %} or {% endif %}\n{% else %}\n{{ exceptions.raise_compiler_error(\n \"`value_pairs_set` argument for expect_column_pair_values_to_be_in_set test cannot have more than 2 item per element.\n Got: '\" ~ pair ~ \"'.'\"\n ) }}\n{% endif %}\n{% endfor %}\n{% endset %}\n{{ dbt_expectations.expression_is_true(model,\n expression=expression,\n group_by_columns=None,\n row_condition=row_condition\n )\n }}\n\n{% endtest %}", + "depends_on": { "macros": ["macro.dbt_expectations.expression_is_true"] }, + "description": "", + "meta": {}, + "docs": { "show": true, "node_color": null }, + "patch_path": null, + "arguments": [], + "created_at": 1705588676.69968, + "supported_languages": null + } + }, + "docs": { + "doc.balboa.__overview__": { + "name": "__overview__", + "resource_type": "doc", + "package_name": "balboa", + "path": "overview.md", + "original_file_path": "models/overview.md", + "unique_id": "doc.balboa.__overview__", + "block_contents": "# Datacoves Demo\nThis project is used as a demonstration of an end-to-end data flow utilizing DataOps best practices and automate processes that:\n* Transform data to make it analytics read\n* Enforce governance rules\n* Capture data lineage\n* Capture documentation\n* Perform data testing\n\n![](https://www.dataops.live/hubfs/DataOps-Infinty-Blue-1.png)\n\n## Need Help\nContact: Noel Gomez at gomezn@datacoves.com" + }, + "doc.balboa.__Analysis__": { + "name": "__Analysis__", + "resource_type": "doc", + "package_name": "balboa", + "path": "overview.md", + "original_file_path": "models/overview.md", + "unique_id": "doc.balboa.__Analysis__", + "block_contents": "# BI Dashboards\nHere you can find information about tools outside of dbt which make use of Snowflake data\n\nOur BI server can be accessed here" + }, + "doc.balboa.__balboa__": { + "name": "__balboa__", + "resource_type": "doc", + "package_name": "balboa", + "path": "overview.md", + "original_file_path": "models/overview.md", + "unique_id": "doc.balboa.__balboa__", + "block_contents": "# Main Project\nThis area describes the models and transformations used in our data warehouse.\n\nMore information can be found on our home page\n\nUseful dbt packages can by found on dbt hub\n\nUseful dbt libraries can by found on the Datacoves dbt libraries page." + }, + "doc.balboa.__dbt_expectations__": { + "name": "__dbt_expectations__", + "resource_type": "doc", + "package_name": "balboa", + "path": "overview.md", + "original_file_path": "models/overview.md", + "unique_id": "doc.balboa.__dbt_expectations__", + "block_contents": "# Test macros provided by dbt expecations\nWe use this package to add more advanced tests to our models.\nFor more information, visit \nthe dbt-expecations site." + }, + "doc.balboa.__dbt_date__": { + "name": "__dbt_date__", + "resource_type": "doc", + "package_name": "balboa", + "path": "overview.md", + "original_file_path": "models/overview.md", + "unique_id": "doc.balboa.__dbt_date__", + "block_contents": "# Utility macros used by dbt_utils\nWe use this suite of utility macros in our transformations." + }, + "doc.dbt.__overview__": { + "name": "__overview__", + "resource_type": "doc", + "package_name": "dbt", + "path": "overview.md", + "original_file_path": "docs/overview.md", + "unique_id": "doc.dbt.__overview__", + "block_contents": "### Welcome!\n\nWelcome to the auto-generated documentation for your dbt project!\n\n### Navigation\n\nYou can use the `Project` and `Database` navigation tabs on the left side of the window to explore the models\nin your project.\n\n#### Project Tab\nThe `Project` tab mirrors the directory structure of your dbt project. In this tab, you can see all of the\nmodels defined in your dbt project, as well as models imported from dbt packages.\n\n#### Database Tab\nThe `Database` tab also exposes your models, but in a format that looks more like a database explorer. This view\nshows relations (tables and views) grouped into database schemas. Note that ephemeral models are _not_ shown\nin this interface, as they do not exist in the database.\n\n### Graph Exploration\nYou can click the blue icon on the bottom-right corner of the page to view the lineage graph of your models.\n\nOn model pages, you'll see the immediate parents and children of the model you're exploring. By clicking the `Expand`\nbutton at the top-right of this lineage pane, you'll be able to see all of the models that are used to build,\nor are built from, the model you're exploring.\n\nOnce expanded, you'll be able to use the `--select` and `--exclude` model selection syntax to filter the\nmodels in the graph. For more information on model selection, check out the [dbt docs](https://docs.getdbt.com/docs/model-selection-syntax).\n\nNote that you can also right-click on models to interactively filter and explore the graph.\n\n---\n\n### More information\n\n- [What is dbt](https://docs.getdbt.com/docs/introduction)?\n- Read the [dbt viewpoint](https://docs.getdbt.com/docs/viewpoint)\n- [Installation](https://docs.getdbt.com/docs/installation)\n- Join the [dbt Community](https://www.getdbt.com/community/) for questions and discussion" + } + }, + "exposures": { + "exposure.balboa.customer_loans": { + "name": "customer_loans", + "resource_type": "exposure", + "package_name": "balboa", + "path": "L4_exposures/customer_loans.yml", + "original_file_path": "models/L4_exposures/customer_loans.yml", + "unique_id": "exposure.balboa.customer_loans", + "fqn": ["balboa", "L4_exposures", "customer_loans"], + "type": "dashboard", + "owner": { "email": "segan@datameer.com", "name": "Steve Egan" }, + "description": "Loan Performance\n", + "label": null, + "maturity": "high", + "meta": {}, + "tags": [], + "config": { "enabled": true }, + "unrendered_config": {}, + "url": "https://superset-dev123.east-us-a.datacoves.com:443/r/1", + "depends_on": { + "macros": [], + "nodes": [ + "model.balboa.personal_loans", + "model.balboa.current_population" + ] + }, + "refs": [ + { "name": "personal_loans", "package": null, "version": null }, + { "name": "current_population", "package": null, "version": null } + ], + "sources": [], + "metrics": [], + "created_at": 1705588677.569696 + }, + "exposure.balboa.loans_analysis": { + "name": "loans_analysis", + "resource_type": "exposure", + "package_name": "balboa", + "path": "L4_exposures/loan_analytics.yml", + "original_file_path": "models/L4_exposures/loan_analytics.yml", + "unique_id": "exposure.balboa.loans_analysis", + "fqn": ["balboa", "L4_exposures", "loans_analysis"], + "type": "dashboard", + "owner": { "email": "gomezn@datacoves.com", "name": "Noel Gomez" }, + "description": "Top 10 states with loans\n", + "label": null, + "maturity": "medium", + "meta": {}, + "tags": [], + "config": { "enabled": true }, + "unrendered_config": {}, + "url": "https://ddyxorozsz-gay725.east-us-a.datacoves.com/", + "depends_on": { + "macros": [], + "nodes": ["source.balboa.LOANS.PERSONAL_LOANS"] + }, + "refs": [], + "sources": [["LOANS", "PERSONAL_LOANS"]], + "metrics": [], + "created_at": 1705588677.572059 + } + }, + "metrics": {}, + "groups": {}, + "selectors": {}, + "disabled": {}, + "parent_map": { + "model.balboa.credits_total": [ + "model.balboa.int_warehouse_metering_history" + ], + "model.balboa.storage_usage_m": ["model.balboa.int_storage_usage"], + "model.balboa.credits_by_month": [ + "model.balboa.int_warehouse_metering_history" + ], + "model.balboa.credits_mtd": ["model.balboa.int_warehouse_metering_history"], + "model.balboa.credits_by_warehouse": [ + "model.balboa.int_warehouse_metering_history" + ], + "model.balboa.credits_variance": [ + "model.balboa.int_warehouse_metering_history" + ], + "model.balboa.query_utilization": ["model.balboa.int_query_history"], + "model.balboa.int_pipe_usage_history": ["model.balboa.pipe_usage_history"], + "model.balboa.int_storage_usage": ["model.balboa.storage_usage"], + "model.balboa.int_warehouse_metering_history": [ + "model.balboa.warehouse_metering_history" + ], + "model.balboa.int_query_history": ["model.balboa.query_history"], + "model.balboa.current_population": ["model.balboa.country_populations"], + "model.balboa.covid_location": ["model.balboa.jhu_covid_19"], + "model.balboa.base_cases": ["model.balboa.jhu_covid_19"], + "model.balboa.total_covid_cases": ["model.balboa.base_cases"], + "model.balboa.personal_loans": ["source.balboa.LOANS.PERSONAL_LOANS"], + "model.balboa.jhu_covid_19": [ + "source.balboa.covid19_epidemiological_data.jhu_covid_19" + ], + "model.balboa.query_history": ["source.balboa.ACCOUNT_USAGE.QUERY_HISTORY"], + "model.balboa.storage_usage": ["source.balboa.ACCOUNT_USAGE.STORAGE_USAGE"], + "model.balboa.warehouse_metering_history": [ + "source.balboa.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY" + ], + "model.balboa.pipe_usage_history": [ + "source.balboa.ACCOUNT_USAGE.PIPE_USAGE_HISTORY" + ], + "model.balboa.country_populations": [ + "source.balboa.RAW.COUNTRY_POPULATIONS" + ], + "model.balboa.engagement_events_report": [ + "source.balboa.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT" + ], + "model.balboa.loans_by_state": ["source.balboa.LOANS.PERSONAL_LOANS"], + "model.balboa.covid_cases_country": [ + "model.balboa.covid_location", + "model.balboa.total_covid_cases" + ], + "model.balboa.covid_cases_state": [ + "model.balboa.covid_location", + "model.balboa.total_covid_cases" + ], + "snapshot.balboa.snp_jhu_dashboard_covid_19_global": [ + "source.balboa.covid19_epidemiological_data.jhu_dashboard_covid_19_global" + ], + "seed.balboa.state_codes": [], + "seed.balboa.covid_cases_expected_values": [], + "test.balboa.not_null_current_population_country_code.fc48086c4b": [ + "model.balboa.current_population" + ], + "test.balboa.unique_current_population_country_code.010d1ff45e": [ + "model.balboa.current_population" + ], + "test.balboa.not_null_base_cases_cases.a7292b3eca": [ + "model.balboa.base_cases" + ], + "test.balboa.not_null_covid_location_state.de61c768b2": [ + "model.balboa.covid_location" + ], + "test.balboa.not_null_covid_location_country.2d0d8f32fe": [ + "model.balboa.covid_location" + ], + "test.balboa.dbt_utils_unique_combination_of_columns_country_populations_country_code__year.f0f4e51143": [ + "model.balboa.country_populations" + ], + "source.balboa.LOANS.PERSONAL_LOANS": [], + "source.balboa.covid19_epidemiological_data.jhu_covid_19": [], + "source.balboa.covid19_epidemiological_data.jhu_dashboard_covid_19_global": [], + "source.balboa.lineage.lineage_processing": [], + "source.balboa.ACCOUNT_USAGE.PIPE_USAGE_HISTORY": [], + "source.balboa.ACCOUNT_USAGE.QUERY_HISTORY": [], + "source.balboa.ACCOUNT_USAGE.STORAGE_USAGE": [], + "source.balboa.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY": [], + "source.balboa.RAW.COUNTRY_POPULATIONS": [], + "source.balboa.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT": [], + "exposure.balboa.customer_loans": [ + "model.balboa.current_population", + "model.balboa.personal_loans" + ], + "exposure.balboa.loans_analysis": ["source.balboa.LOANS.PERSONAL_LOANS"] + }, + "child_map": { + "model.balboa.credits_total": [], + "model.balboa.storage_usage_m": [], + "model.balboa.credits_by_month": [], + "model.balboa.credits_mtd": [], + "model.balboa.credits_by_warehouse": [], + "model.balboa.credits_variance": [], + "model.balboa.query_utilization": [], + "model.balboa.int_pipe_usage_history": [], + "model.balboa.int_storage_usage": ["model.balboa.storage_usage_m"], + "model.balboa.int_warehouse_metering_history": [ + "model.balboa.credits_by_month", + "model.balboa.credits_by_warehouse", + "model.balboa.credits_mtd", + "model.balboa.credits_total", + "model.balboa.credits_variance" + ], + "model.balboa.int_query_history": ["model.balboa.query_utilization"], + "model.balboa.current_population": [ + "exposure.balboa.customer_loans", + "test.balboa.not_null_current_population_country_code.fc48086c4b", + "test.balboa.unique_current_population_country_code.010d1ff45e" + ], + "model.balboa.covid_location": [ + "model.balboa.covid_cases_country", + "model.balboa.covid_cases_state", + "test.balboa.not_null_covid_location_country.2d0d8f32fe", + "test.balboa.not_null_covid_location_state.de61c768b2" + ], + "model.balboa.base_cases": [ + "model.balboa.total_covid_cases", + "test.balboa.not_null_base_cases_cases.a7292b3eca" + ], + "model.balboa.total_covid_cases": [ + "model.balboa.covid_cases_country", + "model.balboa.covid_cases_state" + ], + "model.balboa.personal_loans": ["exposure.balboa.customer_loans"], + "model.balboa.jhu_covid_19": [ + "model.balboa.base_cases", + "model.balboa.covid_location" + ], + "model.balboa.query_history": ["model.balboa.int_query_history"], + "model.balboa.storage_usage": ["model.balboa.int_storage_usage"], + "model.balboa.warehouse_metering_history": [ + "model.balboa.int_warehouse_metering_history" + ], + "model.balboa.pipe_usage_history": ["model.balboa.int_pipe_usage_history"], + "model.balboa.country_populations": [ + "model.balboa.current_population", + "test.balboa.dbt_utils_unique_combination_of_columns_country_populations_country_code__year.f0f4e51143" + ], + "model.balboa.engagement_events_report": [], + "model.balboa.loans_by_state": [], + "model.balboa.covid_cases_country": [], + "model.balboa.covid_cases_state": [], + "snapshot.balboa.snp_jhu_dashboard_covid_19_global": [], + "seed.balboa.state_codes": [], + "seed.balboa.covid_cases_expected_values": [], + "test.balboa.not_null_current_population_country_code.fc48086c4b": [], + "test.balboa.unique_current_population_country_code.010d1ff45e": [], + "test.balboa.not_null_base_cases_cases.a7292b3eca": [], + "test.balboa.not_null_covid_location_state.de61c768b2": [], + "test.balboa.not_null_covid_location_country.2d0d8f32fe": [], + "test.balboa.dbt_utils_unique_combination_of_columns_country_populations_country_code__year.f0f4e51143": [], + "source.balboa.LOANS.PERSONAL_LOANS": [ + "exposure.balboa.loans_analysis", + "model.balboa.loans_by_state", + "model.balboa.personal_loans" + ], + "source.balboa.covid19_epidemiological_data.jhu_covid_19": [ + "model.balboa.jhu_covid_19" + ], + "source.balboa.covid19_epidemiological_data.jhu_dashboard_covid_19_global": [ + "snapshot.balboa.snp_jhu_dashboard_covid_19_global" + ], + "source.balboa.lineage.lineage_processing": [], + "source.balboa.ACCOUNT_USAGE.PIPE_USAGE_HISTORY": [ + "model.balboa.pipe_usage_history" + ], + "source.balboa.ACCOUNT_USAGE.QUERY_HISTORY": ["model.balboa.query_history"], + "source.balboa.ACCOUNT_USAGE.STORAGE_USAGE": ["model.balboa.storage_usage"], + "source.balboa.ACCOUNT_USAGE.WAREHOUSE_METERING_HISTORY": [ + "model.balboa.warehouse_metering_history" + ], + "source.balboa.RAW.COUNTRY_POPULATIONS": [ + "model.balboa.country_populations" + ], + "source.balboa.GOOGLE_ANALYTICS_4.ENGAGEMENT_EVENTS_REPORT": [ + "model.balboa.engagement_events_report" + ], + "exposure.balboa.customer_loans": [], + "exposure.balboa.loans_analysis": [] + }, + "group_map": {}, + "semantic_models": {} +} diff --git a/src/core/dbt-api/test/support/helpers.ex b/src/core/dbt-api/test/support/helpers.ex new file mode 100644 index 00000000..a73626f8 --- /dev/null +++ b/src/core/dbt-api/test/support/helpers.ex @@ -0,0 +1,56 @@ +defmodule Support.Helpers do + import Support.Factory + + def insert_two_accounts_with_repos() do + account_1 = insert(:account) + + # Create an Environment/Database for the main account + project_1 = insert(:project, account: account_1) + + environment_1 = + insert(:environment, + project: project_1, + services: %{"airflow" => %{"enabled" => true}}, + slug: "airflow1" + ) + + # Create an Environment/Database for the another account + account_2 = insert(:account) + project_2 = insert(:project, account: account_2) + + environment_2 = + insert(:environment, + project: project_2, + services: %{"airflow" => %{"enabled" => true}}, + slug: "airflow2" + ) + + pid = ExUnit.Callbacks.start_supervised!(Airflow.Repos) + children = Supervisor.which_children(pid) + + repo_1 = + Enum.find_value(children, fn {id, pid, :worker, _repo} -> id == "airflow1" && pid end) + + repo_2 = + Enum.find_value(children, fn {id, pid, :worker, _repo} -> id == "airflow2" && pid end) + + %{ + account_1: account_1, + account_2: account_2, + project_1: project_1, + project_2: project_2, + environment_1: environment_1, + environment_2: environment_2, + repo_1: repo_1, + repo_2: repo_2 + } + end + + def insert_auth_token_for_user(user, account, environment, project) do + extended_group = + insert(:extended_group, account: account, environment: environment, project: project) + + insert(:group, users: [user], extended_group: extended_group) + insert(:auth_token, user: user) + end +end diff --git a/src/core/dbt-api/test/support/mocks/httpoison_mock.ex b/src/core/dbt-api/test/support/mocks/httpoison_mock.ex new file mode 100644 index 00000000..4c772166 --- /dev/null +++ b/src/core/dbt-api/test/support/mocks/httpoison_mock.ex @@ -0,0 +1,103 @@ +defmodule Support.Mocks.HTTPoisonMock do + alias Datacoves.AuthTokens.AuthTokenRepo + + @file_manifest File.read!("./test/support/fixtures/manifest.json") + |> Jason.decode!() + |> Jason.encode!() + + # Airflow.Repos Tests + + def get("enabled-wo-pods-airflow-postgresql.dcw-enabled-wo-pods.svc.cluster.local:5432") do + {:error, %HTTPoison.Error{reason: :nxdomain}} + end + + def get("enabled-airflow-postgresql.dcw-enabled.svc.cluster.local:5432") do + {:error, %HTTPoison.Error{reason: :closed}} + end + + def get("env-wo-airflow-airflow-postgresql.dcw-env-wo-airflow.svc.cluster.local:5432") do + {:error, %HTTPoison.Error{reason: :not_found}} + end + + # JobController Test + + def get("airflow1-airflow-postgresql.dcw-airflow1.svc.cluster.local:5432") do + {:error, %HTTPoison.Error{reason: :closed}} + end + + def get("airflow2-airflow-postgresql.dcw-airflow2.svc.cluster.local:5432") do + {:error, %HTTPoison.Error{reason: :closed}} + end + + # Storage Mock for ManifestRepo and Internal.{FileController, ManifestController} Tests + + def get("http://localhost:9000/jade-dev//fail-upload" <> _path) do + {:ok, %HTTPoison.Response{status_code: 400, body: "bad request"}} + end + + def get("http://localhost:9000/jade-dev" <> _path) do + {:ok, %HTTPoison.Response{status_code: 200, body: @file_manifest}} + end + + def put("http://localhost:9000/jade-dev" <> _path, "fail-upload") do + {:ok, %HTTPoison.Response{status_code: 400, body: "bad request"}} + end + + def put("http://localhost:9000/jade-dev" <> _path, _content) do + {:ok, %HTTPoison.Response{status_code: 200, body: nil}} + end + + # AuthenticateApiKey Tests + + def get( + "https://api.datacoveslocal.com/api/datacoves/verify", + [{"Authorization", "Token " <> token} | _] = _headers + ) do + handle_bearer_token(token) + end + + defp handle_bearer_token("invalid-token") do + {:ok, %HTTPoison.Response{status_code: 401, body: "Invalid token"}} + end + + defp handle_bearer_token("network-error") do + {:error, %HTTPoison.Error{reason: "closed"}} + end + + defp handle_bearer_token(auth_token_key) do + AuthTokenRepo.get_by(key: auth_token_key) |> build_api_key_response() + end + + defp build_api_key_response({:error, :not_found}) do + {:ok, %HTTPoison.Response{status_code: 404, body: "Not found"}} + end + + defp build_api_key_response({:ok, auth_token}) do + permissions = Enum.map(auth_token.user.permissions, & &1.name) + + extended_groups = + auth_token.user.groups + |> Enum.map(& &1.extended_group) + |> List.flatten() + + accounts = Enum.map(extended_groups, & &1.account) |> Enum.reject(&is_nil/1) + environments = Enum.map(extended_groups, & &1.environment) |> Enum.reject(&is_nil/1) + projects = Enum.map(extended_groups, & &1.project) |> Enum.reject(&is_nil/1) + + body = %{ + "permissions" => permissions, + "account_ids" => Enum.map(accounts, & &1.id), + "accounts" => Enum.map(accounts, & &1.slug), + "environment_ids" => Enum.map(environments, & &1.id), + "environments" => Enum.map(environments, & &1.slug), + "project_ids" => Enum.map(projects, & &1.id), + "projects" => Enum.map(projects, & &1.slug) + } + + encoded_body = Jason.encode!(body) + + {:ok, %HTTPoison.Response{status_code: 200, body: encoded_body}} + end + + def delete(_url), do: {:ok, %HTTPoison.Response{status_code: 200}} +end diff --git a/src/core/dbt-api/test/test_helper.exs b/src/core/dbt-api/test/test_helper.exs new file mode 100644 index 00000000..2d82d545 --- /dev/null +++ b/src/core/dbt-api/test/test_helper.exs @@ -0,0 +1,4 @@ +ExUnit.start() +Ecto.Adapters.SQL.Sandbox.mode(Jade.Repo, :manual) +Ecto.Adapters.SQL.Sandbox.mode(Datacoves.Repo, :manual) +Ecto.Adapters.SQL.Sandbox.mode(Airflow.Repo, :manual) diff --git a/src/core/operator/.dockerignore b/src/core/operator/.dockerignore new file mode 100644 index 00000000..0f046820 --- /dev/null +++ b/src/core/operator/.dockerignore @@ -0,0 +1,4 @@ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ +testbin/ diff --git a/src/core/operator/.gitignore b/src/core/operator/.gitignore new file mode 100644 index 00000000..c0a7a54c --- /dev/null +++ b/src/core/operator/.gitignore @@ -0,0 +1,25 @@ + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ diff --git a/src/core/operator/Dockerfile b/src/core/operator/Dockerfile new file mode 100644 index 00000000..ef4bea4f --- /dev/null +++ b/src/core/operator/Dockerfile @@ -0,0 +1,43 @@ +# Build the manager binary +FROM golang:1.17 as builder + +RUN curl -LO https://get.helm.sh/helm-v3.11.0-linux-amd64.tar.gz && \ + tar -xzf helm-v3.11.0-linux-amd64.tar.gz && \ + mv linux-amd64/helm /usr/bin/helm + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY main.go main.go +COPY api/ api/ +COPY helm/ helm/ +COPY controllers/ controllers/ + +# Build +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o manager main.go + +# Copy and build fake helm binary +COPY cmd/ cmd/ +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -a -o fake_helm cmd/fake_helm/fake_helm.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +LABEL com.datacoves.from=gcr.io/distroless/static:nonroot +LABEL com.datacoves.version.core-operator='0.3.0' +LABEL com.datacoves.library.core-operator.golang='1.17' +LABEL com.datacoves.library.core-operator.helm='3.11' + +WORKDIR / +COPY --from=builder /workspace/manager . +COPY --from=builder /usr/bin/helm /usr/bin/helm +# COPY --from=builder /workspace/fake_helm /usr/bin/helm +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/src/core/operator/Makefile b/src/core/operator/Makefile new file mode 100644 index 00000000..2e2b3740 --- /dev/null +++ b/src/core/operator/Makefile @@ -0,0 +1,131 @@ + +# Image URL to use all building/pushing image targets +IMG ?= core-operator:0.4.0 +# ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. +ENVTEST_K8S_VERSION = 1.25 + +# Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) +ifeq (,$(shell go env GOBIN)) +GOBIN=$(shell go env GOPATH)/bin +else +GOBIN=$(shell go env GOBIN) +endif + +# Setting SHELL to bash allows bash commands to be executed by recipes. +# This is a requirement for 'setup-envtest.sh' in the test target. +# Options are set to exit when a recipe line exits non-zero or a piped command fails. +SHELL = /usr/bin/env bash -o pipefail +.SHELLFLAGS = -ec + +.PHONY: all +all: build + +##@ General + +# The help target prints out all targets with their descriptions organized +# beneath their categories. The categories are represented by '##@' and the +# target descriptions by '##'. The awk commands is responsible for reading the +# entire set of makefiles included in this invocation, looking for lines of the +# file as xyz: ## something, and then pretty-format the target and help. Then, +# if there's a line with ##@ something, that gets pretty-printed as a category. +# More info on the usage of ANSI control characters for terminal formatting: +# https://en.wikipedia.org/wiki/ANSI_escape_code#SGR_parameters +# More info on the awk command: +# http://linuxcommand.org/lc3_adv_awk.php + +.PHONY: help +help: ## Display this help. + @awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-15s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST) + +##@ Development + +.PHONY: manifests +manifests: controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. + $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases + +.PHONY: generate +generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. + $(CONTROLLER_GEN) object:headerFile="/dev/null" paths="./..." + +.PHONY: fmt +fmt: ## Run go fmt against code. + go fmt ./... + +.PHONY: vet +vet: ## Run go vet against code. + go vet ./... + +.PHONY: test +test: manifests generate fmt vet envtest ## Run tests. + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) -p path)" go test ./... -coverprofile cover.out + +##@ Build + +.PHONY: build +build: manifests generate fmt vet ## Build manager binary. + go build -o bin/manager main.go + +.PHONY: run +run: manifests generate fmt vet ## Run a controller from your host. + go run ./main.go + +.PHONY: debug +debug: manifests generate fmt vet ## Run a controller from your host with the delve debugger. + dlv debug ./main.go + +# NOTE: Disabling the code for building and pushing docker images, and +# deployment to clusters. All this is done from scripts, see setup_operator. + +# .PHONY: docker-build +# docker-build: test ## Build docker image with the manager. +# docker build -t ${IMG} . + +# .PHONY: docker-push +# docker-push: ## Push docker image with the manager. +# docker push ${IMG} + +# ##@ Deployment + +# ifndef ignore-not-found +# ignore-not-found = false +# endif + +# .PHONY: install +# install: ## Install CRDs into the K8s cluster specified in ~/.kube/config. +# kubectl apply -k config/crd + +# .PHONY: uninstall +# uninstall: ## Uninstall CRDs from the K8s cluster specified in ~/.kube/config. +# kubectl delete -k config/crd + +# .PHONY: deploy +# deploy: install ## Deploy controller to the K8s cluster specified in ~/.kube/config. +# kubectl apply -k config/default + +# .PHONY: undeploy +# undeploy: ## Undeploy controller from the K8s cluster specified in ~/.kube/config. +# kubectl delete -k config/default + +CONTROLLER_GEN = $(shell pwd)/bin/controller-gen +.PHONY: controller-gen +controller-gen: ## Download controller-gen locally if necessary. + $(call go-get-tool,$(CONTROLLER_GEN),sigs.k8s.io/controller-tools/cmd/controller-gen@v0.15.0) + +ENVTEST = $(shell pwd)/bin/setup-envtest +.PHONY: envtest +envtest: ## Download envtest-setup locally if necessary. + $(call go-get-tool,$(ENVTEST),sigs.k8s.io/controller-runtime/tools/setup-envtest@latest) + +# go-get-tool will 'go get' any package $2 and install it to $1. +PROJECT_DIR := $(shell dirname $(abspath $(lastword $(MAKEFILE_LIST)))) +define go-get-tool +@[ -f $(1) ] || { \ +set -e ;\ +TMP_DIR=$$(mktemp -d) ;\ +cd $$TMP_DIR ;\ +go mod init tmp ;\ +echo "Downloading $(2)" ;\ +GOBIN=$(PROJECT_DIR)/bin go install $(2) ;\ +rm -rf $$TMP_DIR ;\ +} +endef diff --git a/src/core/operator/PROJECT b/src/core/operator/PROJECT new file mode 100644 index 00000000..fcc120fd --- /dev/null +++ b/src/core/operator/PROJECT @@ -0,0 +1,39 @@ +domain: datacoves.com +layout: +- go.kubebuilder.io/v3 +projectName: operator +repo: datacoves.com/operator +resources: +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: datacoves.com + kind: Workspace + path: datacoves.com/operator/api/v1 + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: datacoves.com + kind: User + path: datacoves.com/operator/api/v1 + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: datacoves.com + kind: HelmRelease + path: datacoves.com/operator/api/v1 + version: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: datacoves.com + kind: Account + path: datacoves.com/operator/api/v1 + version: v1 +version: "3" diff --git a/src/core/operator/README.md b/src/core/operator/README.md new file mode 100644 index 00000000..46e59442 --- /dev/null +++ b/src/core/operator/README.md @@ -0,0 +1,44 @@ +This directory has the kubernetes operator that manages datacoves.com/Workspace objects. + +The initial directory structure was scaffolded with kubebuilder. + + +### Running locally + +```bash +# Run the operator. It will use your kubectl config to auth with the API, and it +# runs like a normal go program, outside the cluster. +make run ENABLE_WEBHOOKS=false + +# Or, to run with the delve debugger (brew install delve) +make debug ENABLE_WEBHOOKS=false +``` + +### Run locally in cluster + +```bash +# Build the image and load it into kind. +make docker-build IMG=datacovesprivate/operator:0.3.0 && ../cli.py kind_load_version 0.3.0 + +# Deploy +make deploy IMG=datacovesprivate/operator:0.3.0 +alias kco='kubectl -n operator-system' +kco delete pods -l control-plane=controller-manager # delete the pod to reload the image if needed + +# See logs +kco logs -l control-plane=controller-manager -c manager -f +```` + +### Resources + +* The kubebuilder book +* [Kubernetes API Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md) +* The [controller-runtime docs](https://pkg.go.dev/sigs.k8s.io/controller-runtime) +* The [controller-runtime docs about logging](https://github.com/kubernetes-sigs/controller-runtime/blob/master/TMP-LOGGING.md) +* The nginx-ingress-operator source code +* The kubernetes-operator channel on the kubernetes slack + + +### Recommended dev tools + +* gopls editor integration diff --git a/src/core/operator/api/v1/account_types.go b/src/core/operator/api/v1/account_types.go new file mode 100644 index 00000000..3daee977 --- /dev/null +++ b/src/core/operator/api/v1/account_types.go @@ -0,0 +1,65 @@ +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: Run "make" to regenerate code after modifying this file. + +// AccountSpec defines the desired state of Account +type AccountSpec struct { + // The docker image registry to pull images from. Empty implies dockerhub. + ImageRegistry string `json:"imageRegistry"` + + // +optional + ImagePullSecret string `json:"imagePullSecret,omitempty"` + + // A map of the docker images used. Maps image names to tags (versions). + Images map[string]string `json:"images"` + + // Map the name of a config to the name of a resource containing its current value. + Configs map[string]string `json:"configs"` +} + +// AccountStatus defines the observed state of Account +type AccountStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Account is the Schema for the accounts API +type Account struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AccountSpec `json:"spec,omitempty"` + Status AccountStatus `json:"status,omitempty"` +} + +func (a *Account) ImageName(img string) string { + registry := a.Spec.ImageRegistry + tag := "latest" // TODO: Might not want to fallback to latest. + if t, found := a.Spec.Images[img]; found { + tag = t + } + if registry != "" { + registry += "/" + } + return fmt.Sprintf("%s%s:%s", registry, img, tag) +} + +//+kubebuilder:object:root=true + +// AccountList contains a list of Account +type AccountList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Account `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Account{}, &AccountList{}) +} diff --git a/src/core/operator/api/v1/groupversion_info.go b/src/core/operator/api/v1/groupversion_info.go new file mode 100644 index 00000000..3cfaf772 --- /dev/null +++ b/src/core/operator/api/v1/groupversion_info.go @@ -0,0 +1,20 @@ +// Package v1 contains API Schema definitions for the v1 API group +// +kubebuilder:object:generate=true +// +groupName=datacoves.com +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "datacoves.com", Version: "v1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/src/core/operator/api/v1/helmrelease_types.go b/src/core/operator/api/v1/helmrelease_types.go new file mode 100644 index 00000000..99a1aa9e --- /dev/null +++ b/src/core/operator/api/v1/helmrelease_types.go @@ -0,0 +1,57 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "helm.sh/helm/v3/pkg/release" +) + +// Important: Run "make" to regenerate code after modifying this file + +// HelmReleaseSpec defines the desired state of HelmRelease +type HelmReleaseSpec struct { + // The helm repo url (e.g. https://airflow.apache.org) + RepoURL string `json:"repoURL"` + + // A name for the helm repo (e.g. apache-airflow) + RepoName string `json:"repoName"` + + // The chart to install (e.g. apache-airflow/airflow) + Chart string `json:"chart"` + + // The version of the chart to install (e.g. 1.3.0) + Version string `json:"version"` + + // The name of a secret containing the values.yaml to configure the chart. + ValuesName string `json:"valuesName"` +} + +// HelmReleaseStatus defines the observed state of HelmRelease +type HelmReleaseStatus struct { + Status release.Status `json:"status"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// HelmRelease is the Schema for the helmreleases API +type HelmRelease struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec HelmReleaseSpec `json:"spec,omitempty"` + Status HelmReleaseStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// HelmReleaseList contains a list of HelmRelease +type HelmReleaseList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []HelmRelease `json:"items"` +} + +func init() { + SchemeBuilder.Register(&HelmRelease{}, &HelmReleaseList{}) +} diff --git a/src/core/operator/api/v1/user_types.go b/src/core/operator/api/v1/user_types.go new file mode 100644 index 00000000..ff55eda0 --- /dev/null +++ b/src/core/operator/api/v1/user_types.go @@ -0,0 +1,208 @@ +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +/* + * NOTE: Run "make" to regenerate code after modifying this file. + * + * ALSO: remember to edit the 'Equals' function immediately below this + * structure. + */ + +// UserSpec defines the desired state of User +type UserSpec struct { + // The user's email. + Email string `json:"email"` + + // The user's name + Name string `json:"name"` + + // A slug used in urls. + // +kubebuilder:validation:Pattern=[a-z]([-a-z0-9]*[a-z0-9])? + Slug string `json:"slug"` + + // The permissions to have access to the workspace services + Permissions []PermissionSpec `json:"permissions"` + + // The user's secret name + SecretName string `json:"secretName"` + + // A map of the docker images used. Maps image names to tags (versions). + Images map[string]string `json:"images"` + + // Dbt project home path (code_server) + // +optional + DbtHome string `json:"dbtHome,omitempty"` + + // Code server profile name + // +optional + Profile string `json:"profile,omitempty"` + + // Clone git repository (code_server) + // +optional + CloneRepository string `json:"cloneRepository,omitempty"` + + // Disable code server + // +optional + CodeServerDisabled string `json:"codeServerDisabled,omitempty"` + + // Enable local airflow server as part of code server + // +optional + LocalAirflowEnabled string `json:"localAirflowEnabled,omitempty"` + + // Code server access + // +optional + CodeServerAccess string `json:"codeServerAccess,omitempty"` + + // Code server access + // +optional + CodeServerShareCode string `json:"codeServerShareCode,omitempty"` + + // A map of code server services. + // +optional + CodeServerExposures map[string]ServiceOptions `json:"codeServerExposures"` + + // Disable local dbt docs (code_server) + // +optional + LocalDbtDocsDisabled string `json:"localDbtDocsDisabled,omitempty"` + + // Disable dbt sync server (code_server) + // +optional + DbtSyncServerDisabled string `json:"dbtSyncServerDisabled,omitempty"` + + // Restart annotation (code_server) + // +optional + CodeServerRestartedAt string `json:"codeServerRestartedAt,omitempty"` + + // A map of enabled services. + Services map[string]ServiceOptions `json:"services"` + + // Environment variables for local airflow + // +optional + LocalAirflowEnvironment []NameValuePair `json:"localAirflowEnvironment,omitempty"` +} + +type NameValuePair struct { + Name string `json:"name"` + Value string `json:"value"` +} + +func (u *UserSpec) Equals(v UserSpec) bool { + /* + * NOTE NOTE NOTE: This must be edited when adding fields to the + * above structure or the operator won't pick up the changes. + */ + if u.Email != v.Email || u.Slug != v.Slug || u.DbtHome != v.DbtHome || u.Profile != v.Profile || + u.SecretName != v.SecretName || u.LocalDbtDocsDisabled != v.LocalDbtDocsDisabled || + u.DbtSyncServerDisabled != v.DbtSyncServerDisabled || u.CloneRepository != v.CloneRepository || u.CodeServerDisabled != v.CodeServerDisabled || + u.CodeServerAccess != v.CodeServerAccess || u.CodeServerShareCode != v.CodeServerShareCode || u.CodeServerRestartedAt != v.CodeServerRestartedAt || + u.LocalAirflowEnabled != v.LocalAirflowEnabled { + return false + } + if len(u.Permissions) != len(v.Permissions) { + return false + } + for i, up := range u.Permissions { + vp := v.Permissions[i] + if up != vp { + return false + } + } + if len(u.Images) != len(v.Images) { + return false + } + for ik, ui := range u.Images { + vi, exists := v.Images[ik] + if !exists || ui != vi { + return false + } + } + if len(u.CodeServerExposures) != len(v.CodeServerExposures) { + return false + } + for ik, us := range u.CodeServerExposures { + vi, exists := v.CodeServerExposures[ik] + if !exists || us["port"] != vi["port"] { + return false + } + } + + // Check environment + if len(u.LocalAirflowEnvironment) != len(v.LocalAirflowEnvironment) { + return false + } + + /* + * This assumes environment variables will always show up in the same + * order in the array. However, because we are generating them + * programatically, that should always be the case (unless the array + * changes) + */ + for i, up := range u.LocalAirflowEnvironment { + vp := v.LocalAirflowEnvironment[i] + + if up.Name != vp.Name || up.Value != vp.Value { + return false + } + } + + return true +} + +func (u *UserSpec) HasPermissionForService(serviceName string) bool { + for _, permission := range u.Permissions { + if permission.Service == serviceName { + return true + } + } + return false +} + +func (u *UserSpec) CodeServerEnabled() bool { + return u.CodeServerDisabled != "true" +} + +func (u *UserSpec) IsLocalAirflowEnabled() bool { + return u.LocalAirflowEnabled == "true" +} + +// PermissionSpec defines the desired state of User permissions +type PermissionSpec struct { + // Service prefix, i.e. "airbyte" + Service string `json:"service"` + + // Resources, i.e. "/subpath, read https://www.pomerium.com/reference/#prefix" + // +optional + Path string `json:"path,omitempty"` +} + +// UserStatus defines the observed state of User +type UserStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// User is the Schema for the users API +type User struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec UserSpec `json:"spec,omitempty"` + Status UserStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// UserList contains a list of User +type UserList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []User `json:"items"` +} + +func init() { + SchemeBuilder.Register(&User{}, &UserList{}) +} diff --git a/src/core/operator/api/v1/workspace_types.go b/src/core/operator/api/v1/workspace_types.go new file mode 100644 index 00000000..6403a71c --- /dev/null +++ b/src/core/operator/api/v1/workspace_types.go @@ -0,0 +1,318 @@ +package v1 + +import ( + "fmt" + "strings" + + core "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// NOTE: Run "make" to regenerate code after modifying this file. + +// WorkspaceSpec defines the desired state of Workspace +type WorkspaceSpec struct { + // The workspace's account name. + Account string `json:"account"` + + // The workspace's project name. + Project string `json:"project"` + + // Is account suspended? + // +optional + AccountSuspended string `json:"accountSuspended,omitempty"` + + // The domain of the cluster. Used to derive ingress rules. + ClusterDomain string `json:"clusterDomain"` + + // The name of a cert-manager issuer to put on ingress annotations, for SSL. + // +optional + CertManagerIssuer string `json:"certManagerIssuer,omitempty"` + + // An URL to put on ingress annotations so that external-dns creates DNS records. + // +optional + ExternalDnsUrl string `json:"externalDnsUrl,omitempty"` + + // The IP of the internal-dns so that network policies allow communication with it. + // +optional + InternalDnsIp string `json:"internalDnsIp,omitempty"` + + // The cluster internal IP + // +optional + InternalIp string `json:"internalIp,omitempty"` + + // The cluster external IP + // +optional + ExternalIp string `json:"externalIp,omitempty"` + + // The IP of the internal-dns so that network policies allow communication with it. + // +optional + ClusterApiServerIps ClusterApiServerIps `json:"clusterApiServerIps,omitempty"` + + // The IP of the internal-dns so that network policies allow communication with it. + // +optional + InternalDbClusterIpRange string `json:"internalDbClusterIpRange,omitempty"` + + // The docker image registry to pull images from. Empty implies dockerhub. + ImageRegistry string `json:"imageRegistry"` + + // +optional + ImagePullSecret string `json:"imagePullSecret,omitempty"` + + // A map of the docker images used. Maps image names to tags (versions). + Images map[string]string `json:"images"` + + // Release profile name + ReleaseProfile string `json:"releaseProfile,omitempty"` + + // A map of enabled services. + Services map[string]ServiceOptions `json:"services"` + + // A map of enabled internal services. + InternalServices map[string]ServiceOptions `json:"internalServices"` + + // A map of service helm charts configs + Charts map[string]ChartConfigs `json:"charts"` + + // The workspace's users. + Users []UserSpec `json:"users"` + + // The oidc token attribute used to identify the user + OidcUserId string `json:"oidcUserId"` + + // Map the name of a config to the name of a resource containing its current value. + Configs map[string]string `json:"configs"` + + // The ssh git url to the project repository + // +optional + SshGitRepo string `json:"sshGitRepo,omitempty"` + + // The http git url to the project repository + // +optional + HttpGitRepo string `json:"httpGitRepo,omitempty"` + + // The git clone strategy, could be either 'ssh_clone' or 'http_clone' + GitCloneStrategy string `json:"gitCloneStrategy"` + + // Clone git repository (code_server) + // +optional + CloneRepository string `json:"cloneRepository,omitempty"` + + // Runs python web apps in development mode (code_server) + // +optional + DontUseWsgi string `json:"dontUseWsgi,omitempty"` + + // If true, we will use the node local DNS for all pods. The cluster + // must be installed with the install_node_local_dns option set to true + // in the cluster-params yaml + // +optional + NodeLocalDnsEnabled string `json:"nodeLocalDnsEnabled,omitempty"` + + // Dbt project home path (code_server) + // +optional + DbtHome string `json:"dbtHome,omitempty"` + + // Dbt docs git branch (dbt_docs) + // +optional + DbtDocsGitBranch string `json:"dbtDocsGitBranch,omitempty"` + + // Dbt docs AskPass URL if using Azure (dbt_docs) + // +optional + DbtDocsAskpassUrl string `json:"dbtDocsAskpassUrl,omitempty"` + + // Disable local dbt docs (code_server) + // +optional + LocalDbtDocsDisabled string `json:"localDbtDocsDisabled,omitempty"` + + // Disable dbt sync server (code_server) + // +optional + DbtSyncServerDisabled string `json:"dbtSyncServerDisabled,omitempty"` + + // +optional + ResourceRequirements map[string]core.ResourceRequirements `json:"resourceRequirements,omitempty"` +} + +type ServiceOptions map[string]string + +type ChartConfigs map[string]string + +// WorkspaceStatus defines the observed state of Workspace +type WorkspaceStatus struct { +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// Workspace is the Schema for the workspaces API +type Workspace struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec WorkspaceSpec `json:"spec,omitempty"` + Status WorkspaceStatus `json:"status,omitempty"` +} + +/* + * This adds DNS spec to a given PodSpec if it is necessary (i.e if + * Workspace.WorkspaceSpec.NodeLocalDnsEnabled is True + */ +func (w *Workspace) AddDnsToPodSpecIfNeeded(spec *core.PodSpec) { + if w.NodeLocalDnsEnabled() { + spec.DNSPolicy = core.DNSNone + spec.DNSConfig = &core.PodDNSConfig{ + Nameservers: []string{ + "169.254.20.25", + "10.96.0.10", + }, + Searches: []string{ + "core.svc.cluster.local", + "svc.cluster.local", + "cluster.local", + }, + Options: []core.PodDNSConfigOption{ + { + Name: "ndots", + Value: &[]string{"1"}[0], + }, + { + Name: "attempts", + Value: &[]string{"5"}[0], + }, + { + Name: "timeout", + Value: &[]string{"5"}[0], + }, + }, + } + } +} + +func (w *Workspace) ImageName(img string) string { + registry := w.Spec.ImageRegistry + tag := "latest" // TODO: Might not want to fallback to latest. + if t, found := w.Spec.Images[img]; found { + tag = t + } + if registry != "" { + registry += "/" + } + return fmt.Sprintf("%s%s:%s", registry, img, tag) +} + +/* + * Finds a "profile image". Given an img like "path/name", a profile image will + * have the form "path/pi{profile_id}-name". This function assumes there's only + * one such image in w.Images (workspace.py will make it so). If there's more + * it will return the first it finds. We don't know and we don't care what the + * profile id is from the operator. + * + * If it doesn't find it in the profile images, it will use the ReleaseProfile + * string instead, in such a fashion: + * + * "path/name-{ReleaseProfile}" where release profile will likely be + * something such as "dbt-snowflake" + * + * optional_suffix is an optional parameter; if provided, it will be appeneded + * to the end of the profile name. It is intended for use with local versions + * of things, i.e airflow-airflow-dbt-snowflake-local, etc. + */ +func (w *Workspace) ProfileImageName(img string, optional_suffix ...string) string { + prefix := "pi" + suffix := "" + contains := "-" + img + + if len(optional_suffix) > 0 { + suffix = "-" + optional_suffix[0] + } + + if iSlash := strings.LastIndexByte(img, '/'); iSlash >= 0 { + prefix = img[:iSlash] + "/pi" + contains = "-" + img[iSlash+1:] + suffix + } + registry := w.Spec.ImageRegistry + tag := "" + for imageName, imageTag := range w.Spec.Images { + if !strings.Contains(imageName, contains) || !strings.HasPrefix(imageName, prefix) { + continue + } + // TODO: Check that the remaining bit is numeric. Assuming it for now. + img, tag = imageName, imageTag + break + } + if tag == "" { + // Not found, fallback. + return w.ImageName(img + "-" + w.Spec.ReleaseProfile + suffix) + } + if registry != "" { + registry += "/" + } + return fmt.Sprintf("%s%s:%s", registry, img, tag) +} + +func (w *Workspace) WorkbenchDomain() string { + return fmt.Sprintf("%s.%s", w.Name, w.Spec.ClusterDomain) +} + +func (w *Workspace) ServiceEnabled(serviceName string) bool { + // built-ins, are never disabled so users can still load the environment's workbench + if serviceName == "pomerium" || serviceName == "workbench" { + return true + } + if w.Spec.AccountSuspended == "true" { + return false + } + options, found := w.Spec.Services[serviceName] + return found && options["enabled"] != "false" && options["valid"] != "false" +} + +func (w *Workspace) InternalServiceEnabled(serviceName string) bool { + if w.Spec.AccountSuspended == "true" { + return false + } + options, found := w.Spec.InternalServices[serviceName] + return found && options["enabled"] != "false" && options["valid"] != "false" +} + +func (w *Workspace) NodeLocalDnsEnabled() bool { + return w.Spec.NodeLocalDnsEnabled == "true" +} + +func (w *Workspace) DontUseWsgi() bool { + return w.Spec.DontUseWsgi == "true" +} + +func (w *Workspace) LocalDbtDocs() bool { + return w.Spec.LocalDbtDocsDisabled != "true" +} + +func (w *Workspace) DbtSync() bool { + return w.Spec.DbtSyncServerDisabled != "true" +} + +func (w *Workspace) HPA() bool { + return len(w.Spec.ResourceRequirements) > 0 +} + +//+kubebuilder:object:root=true + +// WorkspaceList contains a list of Workspace +type WorkspaceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Workspace `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Workspace{}, &WorkspaceList{}) +} + +// NOTE: Run "make" to regenerate code after modifying this file. + +// ApiServerSpec: kubectl get endpoints --namespace default kubernetes +type ClusterApiServerIps struct { + // Kubernates api server ips. + Ips []string `json:"ips"` + + // Kubernates api server port. + Ports []int32 `json:"ports"` +} diff --git a/src/core/operator/api/v1/zz_generated.deepcopy.go b/src/core/operator/api/v1/zz_generated.deepcopy.go new file mode 100644 index 00000000..02774494 --- /dev/null +++ b/src/core/operator/api/v1/zz_generated.deepcopy.go @@ -0,0 +1,609 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +// Code generated by controller-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Account) DeepCopyInto(out *Account) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Account. +func (in *Account) DeepCopy() *Account { + if in == nil { + return nil + } + out := new(Account) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Account) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountList) DeepCopyInto(out *AccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Account, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountList. +func (in *AccountList) DeepCopy() *AccountList { + if in == nil { + return nil + } + out := new(AccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountSpec) DeepCopyInto(out *AccountSpec) { + *out = *in + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountSpec. +func (in *AccountSpec) DeepCopy() *AccountSpec { + if in == nil { + return nil + } + out := new(AccountSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AccountStatus) DeepCopyInto(out *AccountStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AccountStatus. +func (in *AccountStatus) DeepCopy() *AccountStatus { + if in == nil { + return nil + } + out := new(AccountStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ChartConfigs) DeepCopyInto(out *ChartConfigs) { + { + in := &in + *out = make(ChartConfigs, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChartConfigs. +func (in ChartConfigs) DeepCopy() ChartConfigs { + if in == nil { + return nil + } + out := new(ChartConfigs) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterApiServerIps) DeepCopyInto(out *ClusterApiServerIps) { + *out = *in + if in.Ips != nil { + in, out := &in.Ips, &out.Ips + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterApiServerIps. +func (in *ClusterApiServerIps) DeepCopy() *ClusterApiServerIps { + if in == nil { + return nil + } + out := new(ClusterApiServerIps) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmRelease) DeepCopyInto(out *HelmRelease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmRelease. +func (in *HelmRelease) DeepCopy() *HelmRelease { + if in == nil { + return nil + } + out := new(HelmRelease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmRelease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmReleaseList) DeepCopyInto(out *HelmReleaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HelmRelease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmReleaseList. +func (in *HelmReleaseList) DeepCopy() *HelmReleaseList { + if in == nil { + return nil + } + out := new(HelmReleaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HelmReleaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmReleaseSpec) DeepCopyInto(out *HelmReleaseSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmReleaseSpec. +func (in *HelmReleaseSpec) DeepCopy() *HelmReleaseSpec { + if in == nil { + return nil + } + out := new(HelmReleaseSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HelmReleaseStatus) DeepCopyInto(out *HelmReleaseStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HelmReleaseStatus. +func (in *HelmReleaseStatus) DeepCopy() *HelmReleaseStatus { + if in == nil { + return nil + } + out := new(HelmReleaseStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NameValuePair) DeepCopyInto(out *NameValuePair) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NameValuePair. +func (in *NameValuePair) DeepCopy() *NameValuePair { + if in == nil { + return nil + } + out := new(NameValuePair) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PermissionSpec) DeepCopyInto(out *PermissionSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PermissionSpec. +func (in *PermissionSpec) DeepCopy() *PermissionSpec { + if in == nil { + return nil + } + out := new(PermissionSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ServiceOptions) DeepCopyInto(out *ServiceOptions) { + { + in := &in + *out = make(ServiceOptions, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceOptions. +func (in ServiceOptions) DeepCopy() ServiceOptions { + if in == nil { + return nil + } + out := new(ServiceOptions) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *User) DeepCopyInto(out *User) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new User. +func (in *User) DeepCopy() *User { + if in == nil { + return nil + } + out := new(User) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *User) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserList) DeepCopyInto(out *UserList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]User, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserList. +func (in *UserList) DeepCopy() *UserList { + if in == nil { + return nil + } + out := new(UserList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UserList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserSpec) DeepCopyInto(out *UserSpec) { + *out = *in + if in.Permissions != nil { + in, out := &in.Permissions, &out.Permissions + *out = make([]PermissionSpec, len(*in)) + copy(*out, *in) + } + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.CodeServerExposures != nil { + in, out := &in.CodeServerExposures, &out.CodeServerExposures + *out = make(map[string]ServiceOptions, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ServiceOptions, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make(map[string]ServiceOptions, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ServiceOptions, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.LocalAirflowEnvironment != nil { + in, out := &in.LocalAirflowEnvironment, &out.LocalAirflowEnvironment + *out = make([]NameValuePair, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserSpec. +func (in *UserSpec) DeepCopy() *UserSpec { + if in == nil { + return nil + } + out := new(UserSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserStatus) DeepCopyInto(out *UserStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserStatus. +func (in *UserStatus) DeepCopy() *UserStatus { + if in == nil { + return nil + } + out := new(UserStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Workspace) DeepCopyInto(out *Workspace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Workspace. +func (in *Workspace) DeepCopy() *Workspace { + if in == nil { + return nil + } + out := new(Workspace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Workspace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceList) DeepCopyInto(out *WorkspaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Workspace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceList. +func (in *WorkspaceList) DeepCopy() *WorkspaceList { + if in == nil { + return nil + } + out := new(WorkspaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WorkspaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceSpec) DeepCopyInto(out *WorkspaceSpec) { + *out = *in + in.ClusterApiServerIps.DeepCopyInto(&out.ClusterApiServerIps) + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make(map[string]ServiceOptions, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ServiceOptions, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.InternalServices != nil { + in, out := &in.InternalServices, &out.InternalServices + *out = make(map[string]ServiceOptions, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ServiceOptions, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.Charts != nil { + in, out := &in.Charts, &out.Charts + *out = make(map[string]ChartConfigs, len(*in)) + for key, val := range *in { + var outVal map[string]string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ChartConfigs, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + (*out)[key] = outVal + } + } + if in.Users != nil { + in, out := &in.Users, &out.Users + *out = make([]UserSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Configs != nil { + in, out := &in.Configs, &out.Configs + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ResourceRequirements != nil { + in, out := &in.ResourceRequirements, &out.ResourceRequirements + *out = make(map[string]corev1.ResourceRequirements, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceSpec. +func (in *WorkspaceSpec) DeepCopy() *WorkspaceSpec { + if in == nil { + return nil + } + out := new(WorkspaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WorkspaceStatus) DeepCopyInto(out *WorkspaceStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WorkspaceStatus. +func (in *WorkspaceStatus) DeepCopy() *WorkspaceStatus { + if in == nil { + return nil + } + out := new(WorkspaceStatus) + in.DeepCopyInto(out) + return out +} diff --git a/src/core/operator/cmd/fake_helm/fake_helm.go b/src/core/operator/cmd/fake_helm/fake_helm.go new file mode 100644 index 00000000..6e2fbae9 --- /dev/null +++ b/src/core/operator/cmd/fake_helm/fake_helm.go @@ -0,0 +1,62 @@ +package main + +import ( + "crypto/rand" + "fmt" + "os" + "time" +) + +func main() { + fmt.Println("args", os.Args[1:]) + cmd := "" + for i := 1; i < len(os.Args); i++ { + switch os.Args[i] { + case "upgrade", "uninstall": + cmd = os.Args[i] + break + case "repo": + if i+1 < len(os.Args) && os.Args[i+1] == "update" { + cmd = "repo update" + } else { + cmd = "repo add" + } + break + } + } + + const MB = 1024 * 1024 + var allocBytes int + var sleepSeconds int + + switch cmd { + case "upgrade": + sleepSeconds = 30 + allocBytes = 20 * MB + case "uninstall": + sleepSeconds = 10 + allocBytes = 20 * MB + case "repo update": + sleepSeconds = 7 + allocBytes = 2 * MB + case "repo add": + sleepSeconds = 1 + allocBytes = 1 * MB + default: + return + } + + b := make([]byte, allocBytes) + _, err := rand.Read(b) + if err != nil { + fmt.Println(err) + os.Exit(1) + } + time.Sleep(time.Duration(sleepSeconds) * time.Second) + + checksum := uint32(0) + for i := 0; i < allocBytes; i++ { + checksum += uint32(b[i]) + } + fmt.Println("done", checksum, os.Args[1:]) +} diff --git a/src/core/operator/config/crd/bases/datacoves.com_accounts.yaml b/src/core/operator/config/crd/bases/datacoves.com_accounts.yaml new file mode 100644 index 00000000..a730f8b2 --- /dev/null +++ b/src/core/operator/config/crd/bases/datacoves.com_accounts.yaml @@ -0,0 +1,74 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: accounts.datacoves.com +spec: + group: datacoves.com + names: + kind: Account + listKind: AccountList + plural: accounts + singular: account + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Account is the Schema for the accounts API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: AccountSpec defines the desired state of Account + properties: + configs: + additionalProperties: + type: string + description: Map the name of a config to the name of a resource containing + its current value. + type: object + imagePullSecret: + type: string + imageRegistry: + description: The docker image registry to pull images from. Empty + implies dockerhub. + type: string + images: + additionalProperties: + type: string + description: A map of the docker images used. Maps image names to + tags (versions). + type: object + required: + - configs + - imageRegistry + - images + type: object + status: + description: AccountStatus defines the observed state of Account + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/src/core/operator/config/crd/bases/datacoves.com_helmreleases.yaml b/src/core/operator/config/crd/bases/datacoves.com_helmreleases.yaml new file mode 100644 index 00000000..4169feab --- /dev/null +++ b/src/core/operator/config/crd/bases/datacoves.com_helmreleases.yaml @@ -0,0 +1,80 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: helmreleases.datacoves.com +spec: + group: datacoves.com + names: + kind: HelmRelease + listKind: HelmReleaseList + plural: helmreleases + singular: helmrelease + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: HelmRelease is the Schema for the helmreleases API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: HelmReleaseSpec defines the desired state of HelmRelease + properties: + chart: + description: The chart to install (e.g. apache-airflow/airflow) + type: string + repoName: + description: A name for the helm repo (e.g. apache-airflow) + type: string + repoURL: + description: The helm repo url (e.g. https://airflow.apache.org) + type: string + valuesName: + description: The name of a secret containing the values.yaml to configure + the chart. + type: string + version: + description: The version of the chart to install (e.g. 1.3.0) + type: string + required: + - chart + - repoName + - repoURL + - valuesName + - version + type: object + status: + description: HelmReleaseStatus defines the observed state of HelmRelease + properties: + status: + description: Status is the status of a release + type: string + required: + - status + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/src/core/operator/config/crd/bases/datacoves.com_users.yaml b/src/core/operator/config/crd/bases/datacoves.com_users.yaml new file mode 100644 index 00000000..be2ca5c1 --- /dev/null +++ b/src/core/operator/config/crd/bases/datacoves.com_users.yaml @@ -0,0 +1,151 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: users.datacoves.com +spec: + group: datacoves.com + names: + kind: User + listKind: UserList + plural: users + singular: user + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: User is the Schema for the users API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: UserSpec defines the desired state of User + properties: + cloneRepository: + description: Clone git repository (code_server) + type: string + codeServerAccess: + description: Code server access + type: string + codeServerDisabled: + description: Disable code server + type: string + codeServerExposures: + additionalProperties: + additionalProperties: + type: string + type: object + description: A map of code server services. + type: object + codeServerRestartedAt: + description: Restart annotation (code_server) + type: string + codeServerShareCode: + description: Code server access + type: string + dbtHome: + description: Dbt project home path (code_server) + type: string + dbtSyncServerDisabled: + description: Disable dbt sync server (code_server) + type: string + email: + description: The user's email. + type: string + images: + additionalProperties: + type: string + description: A map of the docker images used. Maps image names to + tags (versions). + type: object + localAirflowEnabled: + description: Enable local airflow server as part of code server + type: string + localAirflowEnvironment: + description: Environment variables for local airflow + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + localDbtDocsDisabled: + description: Disable local dbt docs (code_server) + type: string + name: + description: The user's name + type: string + permissions: + description: The permissions to have access to the workspace services + items: + description: PermissionSpec defines the desired state of User permissions + properties: + path: + description: Resources, i.e. "/subpath, read https://www.pomerium.com/reference/#prefix" + type: string + service: + description: Service prefix, i.e. "airbyte" + type: string + required: + - service + type: object + type: array + profile: + description: Code server profile name + type: string + secretName: + description: The user's secret name + type: string + services: + additionalProperties: + additionalProperties: + type: string + type: object + description: A map of enabled services. + type: object + slug: + description: A slug used in urls. + pattern: '[a-z]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - email + - images + - name + - permissions + - secretName + - services + - slug + type: object + status: + description: UserStatus defines the observed state of User + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/src/core/operator/config/crd/bases/datacoves.com_workspaces.yaml b/src/core/operator/config/crd/bases/datacoves.com_workspaces.yaml new file mode 100644 index 00000000..18c752d6 --- /dev/null +++ b/src/core/operator/config/crd/bases/datacoves.com_workspaces.yaml @@ -0,0 +1,336 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.8.0 + creationTimestamp: null + name: workspaces.datacoves.com +spec: + group: datacoves.com + names: + kind: Workspace + listKind: WorkspaceList + plural: workspaces + singular: workspace + scope: Namespaced + versions: + - name: v1 + schema: + openAPIV3Schema: + description: Workspace is the Schema for the workspaces API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: WorkspaceSpec defines the desired state of Workspace + properties: + account: + description: The workspace's account name. + type: string + accountSuspended: + description: Is account suspended? + type: string + certManagerIssuer: + description: The name of a cert-manager issuer to put on ingress annotations, + for SSL. + type: string + charts: + additionalProperties: + additionalProperties: + type: string + type: object + description: A map of service helm charts configs + type: object + cloneRepository: + description: Clone git repository (code_server) + type: string + clusterApiServerIps: + description: The IP of the internal-dns so that network policies allow + communication with it. + properties: + ips: + description: Kubernates api server ips. + items: + type: string + type: array + ports: + description: Kubernates api server port. + items: + format: int32 + type: integer + type: array + required: + - ips + - ports + type: object + clusterDomain: + description: The domain of the cluster. Used to derive ingress rules. + type: string + configs: + additionalProperties: + type: string + description: Map the name of a config to the name of a resource containing + its current value. + type: object + dbtDocsAskpassUrl: + description: Dbt docs AskPass URL if using Azure (dbt_docs) + type: string + dbtDocsGitBranch: + description: Dbt docs git branch (dbt_docs) + type: string + dbtHome: + description: Dbt project home path (code_server) + type: string + dbtSyncServerDisabled: + description: Disable dbt sync server (code_server) + type: string + dontUseWsgi: + description: Runs python web apps in development mode (code_server) + type: string + externalDnsUrl: + description: An URL to put on ingress annotations so that external-dns + creates DNS records. + type: string + externalIp: + description: The cluster external IP + type: string + gitCloneStrategy: + description: The git clone strategy, could be either 'ssh_clone' or + 'http_clone' + type: string + httpGitRepo: + description: The http git url to the project repository + type: string + imagePullSecret: + type: string + imageRegistry: + description: The docker image registry to pull images from. Empty + implies dockerhub. + type: string + images: + additionalProperties: + type: string + description: A map of the docker images used. Maps image names to + tags (versions). + type: object + internalDbClusterIpRange: + description: The IP of the internal-dns so that network policies allow + communication with it. + type: string + internalDnsIp: + description: The IP of the internal-dns so that network policies allow + communication with it. + type: string + internalIp: + description: The cluster internal IP + type: string + internalServices: + additionalProperties: + additionalProperties: + type: string + type: object + description: A map of enabled internal services. + type: object + localDbtDocsDisabled: + description: Disable local dbt docs (code_server) + type: string + nodeLocalDnsEnabled: + description: If true, we will use the node local DNS for all pods. The + cluster must be installed with the install_node_local_dns option + set to true in the cluster-params yaml + type: string + oidcUserId: + description: The oidc token attribute used to identify the user + type: string + project: + description: The workspace's project name. + type: string + releaseProfile: + description: Release profile name + type: string + resourceRequirements: + additionalProperties: + description: ResourceRequirements describes the compute resource + requirements. + properties: + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: object + services: + additionalProperties: + additionalProperties: + type: string + type: object + description: A map of enabled services. + type: object + sshGitRepo: + description: The ssh git url to the project repository + type: string + users: + description: The workspace's users. + items: + description: UserSpec defines the desired state of User + properties: + cloneRepository: + description: Clone git repository (code_server) + type: string + codeServerAccess: + description: Code server access + type: string + codeServerDisabled: + description: Disable code server + type: string + codeServerExposures: + additionalProperties: + additionalProperties: + type: string + type: object + description: A map of code server services. + type: object + codeServerRestartedAt: + description: Restart annotation (code_server) + type: string + codeServerShareCode: + description: Code server access + type: string + dbtHome: + description: Dbt project home path (code_server) + type: string + dbtSyncServerDisabled: + description: Disable dbt sync server (code_server) + type: string + email: + description: The user's email. + type: string + images: + additionalProperties: + type: string + description: A map of the docker images used. Maps image names + to tags (versions). + type: object + localAirflowEnabled: + description: Enable local airflow server as part of code server + type: string + localAirflowEnvironment: + description: Environment variables for local airflow + items: + properties: + name: + type: string + value: + type: string + required: + - name + - value + type: object + type: array + localDbtDocsDisabled: + description: Disable local dbt docs (code_server) + type: string + name: + description: The user's name + type: string + permissions: + description: The permissions to have access to the workspace + services + items: + description: PermissionSpec defines the desired state of User + permissions + properties: + path: + description: Resources, i.e. "/subpath, read https://www.pomerium.com/reference/#prefix" + type: string + service: + description: Service prefix, i.e. "airbyte" + type: string + required: + - service + type: object + type: array + profile: + description: Code server profile name + type: string + secretName: + description: The user's secret name + type: string + services: + additionalProperties: + additionalProperties: + type: string + type: object + description: A map of enabled services. + type: object + slug: + description: A slug used in urls. + pattern: '[a-z]([-a-z0-9]*[a-z0-9])?' + type: string + required: + - email + - images + - name + - permissions + - secretName + - services + - slug + type: object + type: array + required: + - account + - charts + - clusterDomain + - configs + - gitCloneStrategy + - imageRegistry + - images + - internalServices + - oidcUserId + - project + - services + - users + type: object + status: + description: WorkspaceStatus defines the observed state of Workspace + type: object + type: object + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/src/core/operator/config/crd/kustomization.yaml b/src/core/operator/config/crd/kustomization.yaml new file mode 100644 index 00000000..dbde5931 --- /dev/null +++ b/src/core/operator/config/crd/kustomization.yaml @@ -0,0 +1,30 @@ +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/datacoves.com_workspaces.yaml +- bases/datacoves.com_users.yaml +- bases/datacoves.com_helmreleases.yaml +- bases/datacoves.com_accounts.yaml +#+kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_workspaces.yaml +#- patches/webhook_in_users.yaml +#- patches/webhook_in_helmreleases.yaml +#- patches/webhook_in_accounts.yaml +#+kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_workspaces.yaml +#- patches/cainjection_in_users.yaml +#- patches/cainjection_in_helmreleases.yaml +#- patches/cainjection_in_accounts.yaml +#+kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/src/core/operator/config/crd/kustomizeconfig.yaml b/src/core/operator/config/crd/kustomizeconfig.yaml new file mode 100644 index 00000000..ec5c150a --- /dev/null +++ b/src/core/operator/config/crd/kustomizeconfig.yaml @@ -0,0 +1,19 @@ +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/src/core/operator/config/crd/patches/cainjection_in_accounts.yaml b/src/core/operator/config/crd/patches/cainjection_in_accounts.yaml new file mode 100644 index 00000000..a77056b6 --- /dev/null +++ b/src/core/operator/config/crd/patches/cainjection_in_accounts.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: accounts.datacoves.com diff --git a/src/core/operator/config/crd/patches/cainjection_in_helmreleases.yaml b/src/core/operator/config/crd/patches/cainjection_in_helmreleases.yaml new file mode 100644 index 00000000..23623abc --- /dev/null +++ b/src/core/operator/config/crd/patches/cainjection_in_helmreleases.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: helmreleases.datacoves.com diff --git a/src/core/operator/config/crd/patches/cainjection_in_users.yaml b/src/core/operator/config/crd/patches/cainjection_in_users.yaml new file mode 100644 index 00000000..0f654021 --- /dev/null +++ b/src/core/operator/config/crd/patches/cainjection_in_users.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: users.datacoves.com diff --git a/src/core/operator/config/crd/patches/cainjection_in_workspaces.yaml b/src/core/operator/config/crd/patches/cainjection_in_workspaces.yaml new file mode 100644 index 00000000..61139a63 --- /dev/null +++ b/src/core/operator/config/crd/patches/cainjection_in_workspaces.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: workspaces.datacoves.com diff --git a/src/core/operator/config/crd/patches/webhook_in_accounts.yaml b/src/core/operator/config/crd/patches/webhook_in_accounts.yaml new file mode 100644 index 00000000..74a421fd --- /dev/null +++ b/src/core/operator/config/crd/patches/webhook_in_accounts.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: accounts.datacoves.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/core/operator/config/crd/patches/webhook_in_helmreleases.yaml b/src/core/operator/config/crd/patches/webhook_in_helmreleases.yaml new file mode 100644 index 00000000..e7f09177 --- /dev/null +++ b/src/core/operator/config/crd/patches/webhook_in_helmreleases.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: helmreleases.datacoves.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/core/operator/config/crd/patches/webhook_in_users.yaml b/src/core/operator/config/crd/patches/webhook_in_users.yaml new file mode 100644 index 00000000..1899e453 --- /dev/null +++ b/src/core/operator/config/crd/patches/webhook_in_users.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: users.datacoves.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/core/operator/config/crd/patches/webhook_in_workspaces.yaml b/src/core/operator/config/crd/patches/webhook_in_workspaces.yaml new file mode 100644 index 00000000..45250e8a --- /dev/null +++ b/src/core/operator/config/crd/patches/webhook_in_workspaces.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: workspaces.datacoves.com +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/src/core/operator/config/default/kustomization.yaml b/src/core/operator/config/default/kustomization.yaml new file mode 100644 index 00000000..129f58de --- /dev/null +++ b/src/core/operator/config/default/kustomization.yaml @@ -0,0 +1,73 @@ +# Adds namespace to all resources. +namespace: operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +bases: +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +- manager_auth_proxy_patch.yaml + +# Mount the controller config file for loading manager configurations +# through a ComponentConfig type +#- manager_config_patch.yaml + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/src/core/operator/config/default/manager_auth_proxy_patch.yaml b/src/core/operator/config/default/manager_auth_proxy_patch.yaml new file mode 100644 index 00000000..45be3188 --- /dev/null +++ b/src/core/operator/config/default/manager_auth_proxy_patch.yaml @@ -0,0 +1,34 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.8.0 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + protocol: TCP + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - name: manager + args: + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" diff --git a/src/core/operator/config/default/manager_config_patch.yaml b/src/core/operator/config/default/manager_config_patch.yaml new file mode 100644 index 00000000..6c400155 --- /dev/null +++ b/src/core/operator/config/default/manager_config_patch.yaml @@ -0,0 +1,20 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager + args: + - "--config=controller_manager_config.yaml" + volumeMounts: + - name: manager-config + mountPath: /controller_manager_config.yaml + subPath: controller_manager_config.yaml + volumes: + - name: manager-config + configMap: + name: manager-config diff --git a/src/core/operator/config/manager/controller_manager_config.yaml b/src/core/operator/config/manager/controller_manager_config.yaml new file mode 100644 index 00000000..c9238b44 --- /dev/null +++ b/src/core/operator/config/manager/controller_manager_config.yaml @@ -0,0 +1,11 @@ +apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 +kind: ControllerManagerConfig +health: + healthProbeBindAddress: :8081 +metrics: + bindAddress: 127.0.0.1:8080 +webhook: + port: 9443 +leaderElection: + leaderElect: true + resourceName: 5c393c71.datacoves.com diff --git a/src/core/operator/config/manager/kustomization.yaml b/src/core/operator/config/manager/kustomization.yaml new file mode 100644 index 00000000..2bcd3eea --- /dev/null +++ b/src/core/operator/config/manager/kustomization.yaml @@ -0,0 +1,10 @@ +resources: +- manager.yaml + +generatorOptions: + disableNameSuffixHash: true + +configMapGenerator: +- name: manager-config + files: + - controller_manager_config.yaml diff --git a/src/core/operator/config/manager/manager.yaml b/src/core/operator/config/manager/manager.yaml new file mode 100644 index 00000000..dd874564 --- /dev/null +++ b/src/core/operator/config/manager/manager.yaml @@ -0,0 +1,60 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + securityContext: + runAsNonRoot: true + containers: + - command: + - /manager + args: + - --leader-elect + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 3Gi + requests: + cpu: 100m + memory: 1Gi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 20 diff --git a/src/core/operator/config/prometheus/kustomization.yaml b/src/core/operator/config/prometheus/kustomization.yaml new file mode 100644 index 00000000..ed137168 --- /dev/null +++ b/src/core/operator/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/src/core/operator/config/prometheus/monitor.yaml b/src/core/operator/config/prometheus/monitor.yaml new file mode 100644 index 00000000..d19136ae --- /dev/null +++ b/src/core/operator/config/prometheus/monitor.yaml @@ -0,0 +1,20 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/src/core/operator/config/rbac/account_editor_role.yaml b/src/core/operator/config/rbac/account_editor_role.yaml new file mode 100644 index 00000000..d647f336 --- /dev/null +++ b/src/core/operator/config/rbac/account_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit accounts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: account-editor-role +rules: +- apiGroups: + - datacoves.com + resources: + - accounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - accounts/status + verbs: + - get diff --git a/src/core/operator/config/rbac/account_viewer_role.yaml b/src/core/operator/config/rbac/account_viewer_role.yaml new file mode 100644 index 00000000..8f100ac5 --- /dev/null +++ b/src/core/operator/config/rbac/account_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view accounts. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: account-viewer-role +rules: +- apiGroups: + - datacoves.com + resources: + - accounts + verbs: + - get + - list + - watch +- apiGroups: + - datacoves.com + resources: + - accounts/status + verbs: + - get diff --git a/src/core/operator/config/rbac/auth_proxy_client_clusterrole.yaml b/src/core/operator/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 00000000..51a75db4 --- /dev/null +++ b/src/core/operator/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,9 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/src/core/operator/config/rbac/auth_proxy_role.yaml b/src/core/operator/config/rbac/auth_proxy_role.yaml new file mode 100644 index 00000000..80e1857c --- /dev/null +++ b/src/core/operator/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,17 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/src/core/operator/config/rbac/auth_proxy_role_binding.yaml b/src/core/operator/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 00000000..ec7acc0a --- /dev/null +++ b/src/core/operator/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/src/core/operator/config/rbac/auth_proxy_service.yaml b/src/core/operator/config/rbac/auth_proxy_service.yaml new file mode 100644 index 00000000..71f17972 --- /dev/null +++ b/src/core/operator/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager diff --git a/src/core/operator/config/rbac/helmrelease_editor_role.yaml b/src/core/operator/config/rbac/helmrelease_editor_role.yaml new file mode 100644 index 00000000..c075d768 --- /dev/null +++ b/src/core/operator/config/rbac/helmrelease_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit helmreleases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: helmrelease-editor-role +rules: +- apiGroups: + - datacoves.com + resources: + - helmreleases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - helmreleases/status + verbs: + - get diff --git a/src/core/operator/config/rbac/helmrelease_viewer_role.yaml b/src/core/operator/config/rbac/helmrelease_viewer_role.yaml new file mode 100644 index 00000000..86ab5926 --- /dev/null +++ b/src/core/operator/config/rbac/helmrelease_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view helmreleases. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: helmrelease-viewer-role +rules: +- apiGroups: + - datacoves.com + resources: + - helmreleases + verbs: + - get + - list + - watch +- apiGroups: + - datacoves.com + resources: + - helmreleases/status + verbs: + - get diff --git a/src/core/operator/config/rbac/kustomization.yaml b/src/core/operator/config/rbac/kustomization.yaml new file mode 100644 index 00000000..731832a6 --- /dev/null +++ b/src/core/operator/config/rbac/kustomization.yaml @@ -0,0 +1,18 @@ +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/src/core/operator/config/rbac/leader_election_role.yaml b/src/core/operator/config/rbac/leader_election_role.yaml new file mode 100644 index 00000000..4190ec80 --- /dev/null +++ b/src/core/operator/config/rbac/leader_election_role.yaml @@ -0,0 +1,37 @@ +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/src/core/operator/config/rbac/leader_election_role_binding.yaml b/src/core/operator/config/rbac/leader_election_role_binding.yaml new file mode 100644 index 00000000..1d1321ed --- /dev/null +++ b/src/core/operator/config/rbac/leader_election_role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/src/core/operator/config/rbac/role.yaml b/src/core/operator/config/rbac/role.yaml new file mode 100644 index 00000000..7f12a88f --- /dev/null +++ b/src/core/operator/config/rbac/role.yaml @@ -0,0 +1,214 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: manager-role +rules: +- apiGroups: + - "" + resources: + - configmaps + - events + - namespaces + - persistentvolumeclaims + - pods + - secrets + - serviceaccounts + - services + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - '*' + resources: + - cronjobs + - jobs + - pods + - pods/attach + - pods/exec + - pods/log + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - daemonsets + - deployments + - replicasets + - statefulsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - autoscaling + resources: + - horizontalpodautoscalers + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - accounts + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - accounts/finalizers + verbs: + - update +- apiGroups: + - datacoves.com + resources: + - accounts/status + verbs: + - get + - patch + - update +- apiGroups: + - datacoves.com + resources: + - helmreleases + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - helmreleases/finalizers + verbs: + - update +- apiGroups: + - datacoves.com + resources: + - helmreleases/status + verbs: + - get + - patch + - update +- apiGroups: + - datacoves.com + resources: + - users + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - users/finalizers + verbs: + - update +- apiGroups: + - datacoves.com + resources: + - users/status + verbs: + - get + - patch + - update +- apiGroups: + - datacoves.com + resources: + - workspaces + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - workspaces/finalizers + verbs: + - update +- apiGroups: + - datacoves.com + resources: + - workspaces/status + verbs: + - get + - patch + - update +- apiGroups: + - networking.k8s.io + resources: + - ingresses + - networkpolicies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - policy + resources: + - poddisruptionbudgets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - rbac.authorization.k8s.io + resources: + - clusterrolebindings + - clusterroles + - rolebindings + - roles + verbs: + - create + - delete + - get + - list + - patch + - update + - watch diff --git a/src/core/operator/config/rbac/role_binding.yaml b/src/core/operator/config/rbac/role_binding.yaml new file mode 100644 index 00000000..2070ede4 --- /dev/null +++ b/src/core/operator/config/rbac/role_binding.yaml @@ -0,0 +1,12 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/src/core/operator/config/rbac/service_account.yaml b/src/core/operator/config/rbac/service_account.yaml new file mode 100644 index 00000000..7cd6025b --- /dev/null +++ b/src/core/operator/config/rbac/service_account.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: controller-manager + namespace: system diff --git a/src/core/operator/config/rbac/user_editor_role.yaml b/src/core/operator/config/rbac/user_editor_role.yaml new file mode 100644 index 00000000..97a24438 --- /dev/null +++ b/src/core/operator/config/rbac/user_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit users. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: user-editor-role +rules: +- apiGroups: + - datacoves.com + resources: + - users + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - users/status + verbs: + - get diff --git a/src/core/operator/config/rbac/user_viewer_role.yaml b/src/core/operator/config/rbac/user_viewer_role.yaml new file mode 100644 index 00000000..640af11e --- /dev/null +++ b/src/core/operator/config/rbac/user_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view users. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: user-viewer-role +rules: +- apiGroups: + - datacoves.com + resources: + - users + verbs: + - get + - list + - watch +- apiGroups: + - datacoves.com + resources: + - users/status + verbs: + - get diff --git a/src/core/operator/config/rbac/workspace_editor_role.yaml b/src/core/operator/config/rbac/workspace_editor_role.yaml new file mode 100644 index 00000000..8ccba0ed --- /dev/null +++ b/src/core/operator/config/rbac/workspace_editor_role.yaml @@ -0,0 +1,24 @@ +# permissions for end users to edit workspaces. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: workspace-editor-role +rules: +- apiGroups: + - datacoves.com + resources: + - workspaces + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - datacoves.com + resources: + - workspaces/status + verbs: + - get diff --git a/src/core/operator/config/rbac/workspace_viewer_role.yaml b/src/core/operator/config/rbac/workspace_viewer_role.yaml new file mode 100644 index 00000000..eec8b05e --- /dev/null +++ b/src/core/operator/config/rbac/workspace_viewer_role.yaml @@ -0,0 +1,20 @@ +# permissions for end users to view workspaces. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: workspace-viewer-role +rules: +- apiGroups: + - datacoves.com + resources: + - workspaces + verbs: + - get + - list + - watch +- apiGroups: + - datacoves.com + resources: + - workspaces/status + verbs: + - get diff --git a/src/core/operator/controllers/account_controller.go b/src/core/operator/controllers/account_controller.go new file mode 100644 index 00000000..31dfdd80 --- /dev/null +++ b/src/core/operator/controllers/account_controller.go @@ -0,0 +1,116 @@ +package controllers + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + + . "datacoves.com/operator/api/v1" +) + +// AccountReconciler reconciles a Account object +type AccountReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// SetupWithManager sets up the controller with the Manager. +func (r *AccountReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&Account{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches( + &source.Kind{Type: &Workspace{}}, + handler.EnqueueRequestsFromMapFunc(r.enqueueForWatched), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + // As long as we have 1 workspace this parameter improves nothing, so + // let's not risk it for now. + // WithOptions(controller.Options{MaxConcurrentReconciles: 12}). + Complete(r) +} + +// When a watched obj changes, trigger a reconcile if the obj has a workspace annotation. +func (r *AccountReconciler) enqueueForWatched(obj client.Object) []reconcile.Request { + kind := obj.GetObjectKind().GroupVersionKind() + if kind.Group == GroupVersion.Group && kind.Version == GroupVersion.Version && kind.Kind == "Workspace" { + workspaceNs := obj.GetNamespace() + workspaceName := obj.GetName() + workspace := Workspace{} + err := r.Get(context.TODO(), client.ObjectKey{Namespace: workspaceNs, Name: workspaceName}, &workspace) + if err != nil { + // If we can't get a workspace with that name, don't reconcile. + return []reconcile.Request{} + } + + accountName := workspace.Spec.Account + accountNs := "dca-" + accountName + account := Account{} + err = r.Get(context.TODO(), client.ObjectKey{Namespace: accountNs, Name: accountName}, &account) + if err != nil { + // If we can't get an account with that name, don't reconcile. + return []reconcile.Request{} + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: account.Name, + Namespace: account.Namespace, + }, + }, + } + } + return []reconcile.Request{} +} + +//+kubebuilder:rbac:groups=datacoves.com,resources=accounts,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=datacoves.com,resources=accounts/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=datacoves.com,resources=accounts/finalizers,verbs=update + +func (r *AccountReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + got := Account{} + account := &got + err = r.Get(ctx, req.NamespacedName, account) + if err != nil { + if errors.IsNotFound(err) { + // The account no longer exists. There is nothing to do and nothing has + // failed so we must return without an error (or we would be retried). + err = nil + } + return + } + if !account.DeletionTimestamp.IsZero() { + // Do nothing if the account is being deleted. + return + } + + // Add the account name to every log call from this reconcile. + logger := log.FromContext(ctx).WithName(account.Name) + ctx = log.IntoContext(ctx, logger) + log := log.FromContext(ctx) + log.Info("reconciling", "generation", account.Generation) + defer func() { + if err == nil { + log.Info("reconciled", "generation", account.Generation) + } + }() + + err = addImagePullSecretToDefaultServiceAccount(ctx, r.Client, account.Namespace, account.Spec.ImagePullSecret) + if err != nil { + log.Error(err, "error in stage: imagePullSecret") + return + } + + return ctrl.Result{}, nil +} diff --git a/src/core/operator/controllers/helmrelease_controller.go b/src/core/operator/controllers/helmrelease_controller.go new file mode 100644 index 00000000..5a922967 --- /dev/null +++ b/src/core/operator/controllers/helmrelease_controller.go @@ -0,0 +1,109 @@ +package controllers + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + + . "datacoves.com/operator/api/v1" + "datacoves.com/operator/helm" +) + +const ( + helmReleaseFinalizer = "datacoves.com/helmrelease-finalizer" +) + +// HelmReleaseReconciler reconciles a HelmRelease object +type HelmReleaseReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// SetupWithManager sets up the controller with the Manager. +func (r *HelmReleaseReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&HelmRelease{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithOptions(controller.Options{MaxConcurrentReconciles: 6}). + Complete(r) +} + +//+kubebuilder:rbac:groups=datacoves.com,resources=helmreleases,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=datacoves.com,resources=helmreleases/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=datacoves.com,resources=helmreleases/finalizers,verbs=update + +func (r *HelmReleaseReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + got := HelmRelease{} + release := &got + err = r.Get(ctx, req.NamespacedName, release) + if err != nil { + if errors.IsNotFound(err) { + // The release no longer exists. There is nothing to do and nothing has + // failed so we must return without an error (or we would be retried). + err = nil + } + return + } + + // Add the release name to every log call from this reconcile. + logger := log.FromContext(ctx).WithName(release.Name) + ctx = log.IntoContext(ctx, logger) + log := log.FromContext(ctx) + log.Info("reconciling", "generation", release.Generation) + defer func() { + if err == nil { + log.Info("reconciled", "generation", release.Generation) + } + }() + + // Uninstall on HelmResource deletion using a finalizer. + if release.ObjectMeta.DeletionTimestamp.IsZero() { + if !ctrlu.ContainsFinalizer(release, helmReleaseFinalizer) { + ctrlu.AddFinalizer(release, helmReleaseFinalizer) + err = r.Update(ctx, release) + if err != nil { + return + } + } + } else { + if ctrlu.ContainsFinalizer(release, helmReleaseFinalizer) { + helm.Uninstall(releaseChart(release)) + ctrlu.RemoveFinalizer(release, helmReleaseFinalizer) + err = r.Update(ctx, release) + if err != nil { + return + } + } + + err = nil + return + } + + helm.Install( + releaseChart(release), + helm.InstallArgs{ + RepoURL: release.Spec.RepoURL, + RepoName: release.Spec.RepoName, + Version: release.Spec.Version, + ValuesName: release.Spec.ValuesName, + }) + + err = nil + return +} + +func releaseChart(release *HelmRelease) helm.Chart { + return helm.Chart{ + Namespace: release.Namespace, + Name: release.Spec.Chart, + Release: release.Name, + } +} diff --git a/src/core/operator/controllers/reconcilers.go b/src/core/operator/controllers/reconcilers.go new file mode 100644 index 00000000..5e80e6b2 --- /dev/null +++ b/src/core/operator/controllers/reconcilers.go @@ -0,0 +1,620 @@ +package controllers + +import ( + "context" + "fmt" + + "github.com/go-logr/logr" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/equality" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + apps "k8s.io/api/apps/v1" + autoscaling "k8s.io/api/autoscaling/v2" + core "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1" + policy "k8s.io/api/policy/v1beta1" + rbac "k8s.io/api/rbac/v1" +) + +// Helpers to reconcile resources. Used controllerutils.CreateOrUpdate for +// reference, but these are saner because they take in the desired state of the +// object instead of a mutate function to bang it into shape. + +func logCreate(log logr.Logger, kind, name string) { + log.Info("create "+kind, "name", name) +} + +func logUpdate(log logr.Logger, kind, name, reason string) { + log.Info("update "+kind, "name", name, "reason", reason) +} + +func logDelete(log logr.Logger, kind, name string) { + log.Info("delete "+kind, "name", name) +} + +func reconcileDeployment(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *apps.Deployment) error { + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := apps.Deployment{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "Deployment", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + reason := reasonToUpdateDeployment(log, &got.Spec, &obj.Spec) + if reason != "" { + logUpdate(log, "Deployment", obj.Name, reason) + return c.Update(ctx, obj) + } + } + return err +} + +func reconcileStatefulSet(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *apps.StatefulSet) error { + // Set the owner of the statefulset, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := apps.StatefulSet{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "Statefulset", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + reason := reasonToUpdateStatefulSet(log, &got.Spec, &obj.Spec) + if reason != "" { + logUpdate(log, "Statefulset", obj.Name, reason) + return c.Update(ctx, obj) + } + } + return err +} + +func reconcileNetworkPolicy(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *networking.NetworkPolicy) error { + // Set the owner of the network policy, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := networking.NetworkPolicy{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "NetworkPolicy", obj.Name) + return c.Create(ctx, obj) + } + return c.Update(ctx, obj) +} + +func reconcileHPA(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *autoscaling.HorizontalPodAutoscaler) error { + // Set the owner of the HPA, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := autoscaling.HorizontalPodAutoscaler{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "HorizontalPodAutoscaler", obj.Name) + return c.Create(ctx, obj) + } + return c.Update(ctx, obj) +} + +// NOTE: If this function is returning a reason to update when nothing has +// really changed, it is most likely because the objects are different in a +// trivial way, often because obj has zero values for some field (relying on the +// API setting some defaults) and what we got has the field set. +// So, the equality comparison we want depends on what the libraries do. +// No good answer with this approach... We'll have to keep refining our +// equality comparisons until it behaves like we need it to. +// Another stopgap measure is to set defaults explicitly when creating objects. +func reasonToUpdateDeployment(log logr.Logger, got, obj *apps.DeploymentSpec) string { + // These are just forks of golang's reflect.DeepEqual with special handling + // for a few k8s api types like resource.Quantity. The only semantic thing + // about them is the name... Better than nothing, I guess... + eq := equality.Semantic.DeepEqual + eqd := equality.Semantic.DeepDerivative + + switch { + case !eq(got.Replicas, obj.Replicas): + return "replicas" + case !eqd(got.Selector, obj.Selector): + return "selector" + } + + return reasonToUpdatePodTemplate(log, &got.Template.Spec, &obj.Template.Spec) +} + +func reasonToUpdateStatefulSet(log logr.Logger, got, obj *apps.StatefulSetSpec) string { + // These are just forks of golang's reflect.DeepEqual with special handling + // for a few k8s api types like resource.Quantity. The only semantic thing + // about them is the name... Better than nothing, I guess... + eq := equality.Semantic.DeepEqual + eqd := equality.Semantic.DeepDerivative + + switch { + case !eq(got.Replicas, obj.Replicas): + return "replicas" + case !eqd(got.Selector, obj.Selector): + return "selector" + } + + return reasonToUpdatePodTemplate(log, &got.Template.Spec, &obj.Template.Spec) +} + +func reasonToUpdatePodTemplate(log logr.Logger, got, obj *core.PodSpec) string { + eq := equality.Semantic.DeepEqual + eqd := equality.Semantic.DeepDerivative + + switch { + case !eq(got.NodeSelector, obj.NodeSelector): + return "template.spec.nodeSelector" + case !eqd(got.Volumes, obj.Volumes): + return "template.spec.volumes" + } + + if len(obj.Containers) != len(got.Containers) { + return "template.spec.containers" + } + + for i, objc := range obj.Containers { + gotc := got.Containers[i] + + // If the spec containers order is mantained, this won't be needed, but + // just in case, we check if the names match, and if they don't we look + // for a container with a matching name. If we haven't found one, the + // specs differ and the deployment should be updated. + if gotc.Name != objc.Name { + found := false + for _, c := range obj.Containers { + if gotc.Name == objc.Name { + found = true + gotc = c + break + } + } + if !found { + return "container.name" + } + } + + switch { + case gotc.Name != objc.Name: + return "container.name" + case gotc.Image != objc.Image: + return "container.image" + case gotc.ImagePullPolicy != objc.ImagePullPolicy: + return "container.imagePullPolicy" + case gotc.WorkingDir != objc.WorkingDir: + return "container.workingDir" + case gotc.Stdin != objc.Stdin: + return "container.stdin" + case gotc.TTY != objc.TTY: + return "container.tty" + + case !eq(gotc.Env, objc.Env): + return "container.env" + case !eqd(gotc.EnvFrom, objc.EnvFrom): + return "container.envFrom" + case !eqd(gotc.Ports, objc.Ports): + return "containter.ports" + case !eqd(gotc.VolumeMounts, objc.VolumeMounts): + return "container.volumeMounts" + case !eq(gotc.Resources, objc.Resources): + return "container.resources" + // We monitor liveness/readiness probes changes only on initialdelayseconds property since it's changed + // dynamically on pomerium. We SHOULD NOT monitor other properties of liveness/readiness probes + // since we don't set explicit values to all of them and k8s sets defaults (changes them) + case gotc.LivenessProbe != nil && objc.LivenessProbe != nil && gotc.LivenessProbe.InitialDelaySeconds != objc.LivenessProbe.InitialDelaySeconds: + return "container.LivenessProbe.InitialDelaySeconds" + case gotc.ReadinessProbe != nil && objc.ReadinessProbe != nil && gotc.ReadinessProbe.InitialDelaySeconds != objc.ReadinessProbe.InitialDelaySeconds: + return "container.ReadinessProbe.InitialDelaySeconds" + } + } + + return "" +} + +func reconcileDaemonSet(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *apps.DaemonSet) error { + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := apps.DaemonSet{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "DaemonSet", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + reason := reasonToUpdateDaemonSet(log, &got.Spec, &obj.Spec) + if reason != "" { + logUpdate(log, "DaemonSet", obj.Name, reason) + return c.Update(ctx, obj) + } + } + return err +} + +func reasonToUpdateDaemonSet(log logr.Logger, got, obj *apps.DaemonSetSpec) string { + // These are just forks of golang's reflect.DeepEqual with special handling + // for a few k8s api types like resource.Quantity. The only semantic thing + // about them is the name... Better than nothing, I guess... + // eq := equality.Semantic.DeepEqual + eqd := equality.Semantic.DeepDerivative + + switch { + case !eqd(got.Selector, obj.Selector): + return "selector" + } + + return reasonToUpdatePodTemplate(log, &got.Template.Spec, &obj.Template.Spec) +} + +func reconcileSecret(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *core.Secret) error { + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := core.Secret{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "Secret", obj.Name) + return c.Create(ctx, obj) + } + + // NOTE: No updates. We treat Secrets as immutable. + + return err +} + +func reconcileConfigMap(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *core.ConfigMap) error { + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := core.ConfigMap{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "ConfigMap", obj.Name) + return c.Create(ctx, obj) + } + + // NOTE: No updates. We treat ConfigMaps as immutable. + + return err +} + +func reconcileService(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *core.Service) error { + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := core.Service{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "Service", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + reason := reasonToUpdateService(log, &got.Spec, &obj.Spec) + if reason != "" { + // Updates are tricky. Instead we delete the service and return an + // error so reconciliation runs again and recreates it. + err := c.Delete(ctx, &got) + if err != nil { + return err + } + return fmt.Errorf("Service %s spec mismatch. Deleting and recreating", obj.Name) + } + } + return err +} + +func reasonToUpdateService(log logr.Logger, got, obj *core.ServiceSpec) string { + eqd := equality.Semantic.DeepDerivative + + switch { + case !eqd(got.Ports, obj.Ports): + if len(got.Ports) == len(obj.Ports) { + // Set equal the fields that we want to ignore and compare again. + for i, port := range obj.Ports { + got.Ports[i].NodePort = port.NodePort + } + if eqd(got.Ports, obj.Ports) { + return "" + } + } + return "ports" + case !eqd(got.Selector, obj.Selector): + return "selector" + } + return "" +} + +func reconcilePersistentVolumeClaim(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *core.PersistentVolumeClaim) error { + // Let's not set the owner ref for now, in case we want the PVCs to outlive + // the workspace. + // err := ctrlu.SetControllerReference(owner, obj, scheme) + // if err != nil { + // return err + // } + + log := log.FromContext(ctx) + + got := core.PersistentVolumeClaim{} + err := c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "PersistentVolumeClaim", obj.Name) + return c.Create(ctx, obj) + } + + return err +} + +func reconcileServiceAccount(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *core.ServiceAccount) error { + // Set the owner of the ServiceAccount, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := core.ServiceAccount{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "ServiceAccount", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + eq := equality.Semantic.DeepEqual + reason := "" + if !eq(got.ImagePullSecrets, obj.ImagePullSecrets) { + reason = "imagePullSecrets" + } + if !eq(got.AutomountServiceAccountToken, obj.AutomountServiceAccountToken) { + reason += " automountServiceAccountToken" + } + if reason != "" { + logUpdate(log, "ServiceAccount", obj.Name, reason) + return c.Update(ctx, obj) + } + } + return err +} + +func reconcileRole(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *rbac.Role) error { + // Set the owner of the Role, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := rbac.Role{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "Role", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + if !rulesEqual(got.Rules, obj.Rules) { + logUpdate(log, "Role", obj.Name, "rules") + return c.Update(ctx, obj) + } + } + return err +} + +func reconcileRoleBinding(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *rbac.RoleBinding) error { + // Set the owner of the RoleBinding, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := rbac.RoleBinding{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "RoleBinding", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + eqd := equality.Semantic.DeepDerivative + reason := "" + if !eqd(got.RoleRef, obj.RoleRef) { + reason = "roleRef" + } + if !eqd(got.Subjects, obj.Subjects) { + reason += " subjects" + } + if reason != "" { + logUpdate(log, "RoleBinding", obj.Name, reason) + return c.Update(ctx, obj) + } + } + return err +} + +func reconcileClusterRole(ctx context.Context, c client.Client, scheme *runtime.Scheme, obj *rbac.ClusterRole) error { + log := log.FromContext(ctx) + + got := rbac.ClusterRole{} + err := c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "ClusterRole", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + eqd := equality.Semantic.DeepDerivative + if !eqd(got.Rules, obj.Rules) { + logUpdate(log, "ClusterRole", obj.Name, "rules") + return c.Update(ctx, obj) + } + } + return err +} + +func reconcileClusterRoleBinding(ctx context.Context, c client.Client, scheme *runtime.Scheme, obj *rbac.ClusterRoleBinding) error { + log := log.FromContext(ctx) + + got := rbac.ClusterRoleBinding{} + err := c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "ClusterRoleBinding", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + eqd := equality.Semantic.DeepDerivative + reason := "" + if !eqd(got.RoleRef, obj.RoleRef) { + reason = "roleRef" + } + if !eqd(got.Subjects, obj.Subjects) { + reason += " subjects" + } + if reason != "" { + logUpdate(log, "ClusterRoleBinding", obj.Name, reason) + return c.Update(ctx, obj) + } + } + return err +} + +func reconcilePodDisruptionPolicy(ctx context.Context, c client.Client, scheme *runtime.Scheme, owner v1.Object, obj *policy.PodDisruptionBudget) error { + // Set the owner of the PodDisruptionBudget, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(owner, obj, scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := policy.PodDisruptionBudget{} + err = c.Get(ctx, client.ObjectKeyFromObject(obj), &got) + if err != nil && errors.IsNotFound(err) { + logCreate(log, "PodDisruptionBudget", obj.Name) + return c.Create(ctx, obj) + } + if err == nil { + eq := equality.Semantic.DeepEqual + reason := "" + if !eq(got.Spec.Selector, obj.Spec.Selector) { + reason = "selector" + } + if !eq(got.Spec.MinAvailable, obj.Spec.MinAvailable) { + reason += " minAvailable" + } + if reason != "" { + logUpdate(log, "PodDisruptionBudget", obj.Name, reason) + return c.Update(ctx, obj) + } + } + return err +} + +func deleteDeployments(ctx context.Context, c client.Client, namespace string, names ...string) error { + for _, name := range names { + err := deleteDeployment(ctx, c, namespace, name) + if err != nil { + return err + } + } + return nil +} + +func deleteDeployment(ctx context.Context, c client.Client, namespace string, name string) error { + got := apps.Deployment{} + err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &got) + if err != nil && errors.IsNotFound(err) { + return nil + } + if err == nil { + log := log.FromContext(ctx) + logDelete(log, "Deployment", name) + return c.Delete(ctx, &got) + } + return err +} + +func deleteServices(ctx context.Context, c client.Client, namespace string, names ...string) error { + for _, name := range names { + err := deleteService(ctx, c, namespace, name) + if err != nil { + return err + } + } + return nil +} + +func deleteService(ctx context.Context, c client.Client, namespace string, name string) error { + got := core.Service{} + err := c.Get(ctx, client.ObjectKey{Namespace: namespace, Name: name}, &got) + if err != nil && errors.IsNotFound(err) { + return nil + } + if err == nil { + log := log.FromContext(ctx) + logDelete(log, "Service", name) + return c.Delete(ctx, &got) + } + return err +} + +func rulesEqual(got []rbac.PolicyRule, obj []rbac.PolicyRule) bool { + if len(got) != len(obj) { + return false + } + + for i := range got { + if !equality.Semantic.DeepDerivative(got[i].Resources, obj[i].Resources) || + !equality.Semantic.DeepDerivative(got[i].Verbs, obj[i].Verbs) || + !equality.Semantic.DeepDerivative(got[i].APIGroups, obj[i].APIGroups) { + return false + } + } + + return true +} diff --git a/src/core/operator/controllers/suite_test.go b/src/core/operator/controllers/suite_test.go new file mode 100644 index 00000000..c8a8dd30 --- /dev/null +++ b/src/core/operator/controllers/suite_test.go @@ -0,0 +1,64 @@ +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/envtest/printer" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + datacovescomv1 "datacoves.com/operator/api/v1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecsWithDefaultAndCustomReporters(t, + "Controller Suite", + []Reporter{printer.NewlineReporter{}}) +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = datacovescomv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}, 60) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/src/core/operator/controllers/user_code_server.go b/src/core/operator/controllers/user_code_server.go new file mode 100644 index 00000000..001070d3 --- /dev/null +++ b/src/core/operator/controllers/user_code_server.go @@ -0,0 +1,499 @@ +package controllers + +import ( + "context" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + + . "datacoves.com/operator/api/v1" + u "datacoves.com/operator/controllers/utils" +) + +const codeServerPort = 8443 +const localAirflowPort = 8080 + +func (r *UserReconciler) codeServer(ctx context.Context, workspace *Workspace, user *User) error { + if !workspace.ServiceEnabled("code-server") || !user.Spec.HasPermissionForService("code-server") { + err := deleteDeployment(ctx, r.Client, workspace.Namespace, "code-server-"+user.Spec.Slug) + return err + } + + err := reconcileServiceAccount(ctx, r.Client, r.Scheme, workspace, genServiceAccount(workspace, &user.Spec)) + if err != nil { + return err + } + + err = reconcileRole(ctx, r.Client, r.Scheme, workspace, genRole(workspace, &user.Spec)) + if err != nil { + return err + } + + err = reconcileRoleBinding(ctx, r.Client, r.Scheme, workspace, genRoleBinding(workspace, &user.Spec)) + if err != nil { + return err + } + + err = reconcilePersistentVolumeClaim(ctx, r.Client, r.Scheme, user, genCodeServerPersistentVolumeClaim(workspace, &user.Spec)) + if err != nil { + return err + } + return reconcileStatefulSet(ctx, r.Client, r.Scheme, user, genCodeServerStatefulSet(workspace, &user.Spec)) +} + +func codeServerName(user *UserSpec) string { + return "code-server-" + user.Slug +} + +func codeServerConfigVolumeName(user *UserSpec) string { + return codeServerName(user) + "-config-volume" +} + +func codeServerSecretsVolumeName(user *UserSpec) string { + return codeServerName(user) + "-secrets-volume" +} + +func genCodeServerStatefulSet(workspace *Workspace, user *UserSpec) *apps.StatefulSet { + name := codeServerName(user) + configVolume := codeServerConfigVolumeName(user) + secretsVolume := codeServerSecretsVolumeName(user) + + labels := map[string]string{ + "app": name, + "role": "code-server", + } + + meta := v1.ObjectMeta{ + Name: name, + Namespace: workspace.Namespace, + Labels: labels, + Annotations: map[string]string{ + "kubectl.kubernetes.io/restartedAt": user.CodeServerRestartedAt, + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false", + }, + } + + codeServerPorts := []core.ContainerPort{ + {ContainerPort: codeServerPort, Protocol: core.ProtocolTCP, Name: "http"}, + } + + for exposureKey, exposureOptions := range user.CodeServerExposures { + iport, err := strconv.Atoi(exposureOptions["port"]) + if err == nil { + codeServerPorts = append(codeServerPorts, core.ContainerPort{ContainerPort: int32(iport), Protocol: core.ProtocolTCP, Name: exposureKey}) + } + } + + codeServerContainer := core.Container{ + Name: "code-server", + Image: workspace.ProfileImageName("datacovesprivate/code-server-code-server"), + ImagePullPolicy: core.PullIfNotPresent, + Ports: codeServerPorts, + LivenessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/", + Port: intstr.IntOrString{ + Type: intstr.Type(1), + IntVal: 0, + StrVal: "http", + }, + }, + }, + InitialDelaySeconds: 120, // we needed to increment it since orrum takes > 1 min sometimes + TimeoutSeconds: 1, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + ReadinessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/", + Port: intstr.IntOrString{ + Type: intstr.Type(1), + IntVal: 0, + StrVal: "http", + }, + }, + }, + InitialDelaySeconds: 15, + TimeoutSeconds: 1, + PeriodSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + EnvFrom: []core.EnvFromSource{ + {SecretRef: &core.SecretEnvSource{ + LocalObjectReference: core.LocalObjectReference{ + Name: user.SecretName, + }, + }}, + }, + + VolumeMounts: []core.VolumeMount{ + { + Name: configVolume, + MountPath: "/config", + }, + { + Name: secretsVolume, + MountPath: "/opt/datacoves/user", + ReadOnly: true, + }, + }, + // TODO: Deprecate DBT_HOME and CODE_HOME + Env: u.Env{}. + Set("PUID", "1000"). + Set("PGID", "1000"). + Set("TZ", "America/Los_Angeles"). + Set("DBT_HOME", "/config/workspace/"+workspace.Spec.DbtHome). + Set("DATACOVES__DBT_HOME", "/config/workspace/"+workspace.Spec.DbtHome). + Set("DATACOVES__USER_EMAIL", user.Email). + Set("DATACOVES__USER_FULLNAME", user.Name). + Set("DATACOVES__REPOSITORY_URL", workspace.Spec.SshGitRepo). + Set("DATACOVES__REPOSITORY_CLONE", workspace.Spec.CloneRepository). + Set("DATACOVES__USER_SLUG", user.Slug), + } + + dbtDocsContainer := core.Container{ + Name: "dbt-docs", + Image: workspace.ImageName("datacovesprivate/observe-local-dbt-docs"), + ImagePullPolicy: core.PullIfNotPresent, + Ports: []core.ContainerPort{{ContainerPort: 80, Protocol: core.ProtocolTCP}}, + Env: u.Env{}. + Set("DBT_HOME", "workspace/"+workspace.Spec.DbtHome). + Set("DATACOVES__DBT_HOME", "/config/workspace/"+workspace.Spec.DbtHome), + VolumeMounts: []core.VolumeMount{ + { + Name: configVolume, + MountPath: "/usr/share/nginx/html/code-server", + ReadOnly: true, + }, + }, + } + + // Use dbt-osmosis image if dbt-core-interface was not found + img := workspace.ProfileImageName("datacovesprivate/code-server-dbt-core-interface") + if strings.HasSuffix(img, ":latest") { + img = workspace.ProfileImageName("datacovesprivate/code-server-dbt-osmosis") + } + + dbtSyncServerContainer := core.Container{ + Name: "dbt-core-interface", + Image: img, + ImagePullPolicy: core.PullIfNotPresent, + Ports: []core.ContainerPort{{ContainerPort: 8581, Protocol: core.ProtocolTCP}}, + EnvFrom: []core.EnvFromSource{ + {SecretRef: &core.SecretEnvSource{ + LocalObjectReference: core.LocalObjectReference{ + Name: user.SecretName, + }, + }}, + }, + Env: u.Env{}. + Set("DBT_HOME", "workspace/"+workspace.Spec.DbtHome). + Set("DATACOVES__DBT_HOME", "/config/workspace/"+workspace.Spec.DbtHome). + Set("CODE_HOME", "/config"). + Set("DATACOVES__CODE_HOME", "/config"). + Set("DATACOVES__USER_EMAIL", user.Email). + Set("DATACOVES__USER_FULLNAME", user.Name). + Set("DATACOVES__REPOSITORY_URL", workspace.Spec.SshGitRepo). + Set("DATACOVES__REPOSITORY_CLONE", workspace.Spec.CloneRepository). + Set("DATACOVES__USER_SLUG", user.Slug), + VolumeMounts: []core.VolumeMount{ + { + Name: configVolume, + MountPath: "/config", + }, + }, + } + + localAirflowEnv := u.Env{}. + Set("AIRFLOW__DATABASE__SQL_ALCHEMY_CONN", "sqlite:////opt/airflow/database/airflow.db"). + Set("AIRFLOW__WEBSERVER__ENABLE_PROXY_FIX", "True") + + // Copy over environment variables + for _, nameval := range user.LocalAirflowEnvironment { + localAirflowEnv = localAirflowEnv.Set(nameval.Name, nameval.Value) + } + + // Make a local airflow container if we're doing such things. + localAirflowContainer := core.Container{ + Name: "local-airflow", + Image: workspace.ProfileImageName("datacovesprivate/airflow-airflow"), + ImagePullPolicy: core.PullIfNotPresent, + Ports: []core.ContainerPort{{ContainerPort: 8080, Protocol: core.ProtocolTCP}}, + EnvFrom: []core.EnvFromSource{ + {SecretRef: &core.SecretEnvSource{ + LocalObjectReference: core.LocalObjectReference{ + Name: user.SecretName, + }, + }}, + }, + LivenessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/", + Port: intstr.FromInt(8080), + }, + }, + InitialDelaySeconds: 60, + TimeoutSeconds: 30, + PeriodSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 20, + }, + ReadinessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/health", + Port: intstr.FromInt(8080), + }, + }, + InitialDelaySeconds: 15, + TimeoutSeconds: 30, + PeriodSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 20, + }, + Env: localAirflowEnv, + + VolumeMounts: []core.VolumeMount{ + { + Name: configVolume, + MountPath: "/opt/airflow/dags/repo", + SubPath: "workspace", + }, + { + Name: configVolume, + MountPath: "/opt/airflow/database", + SubPath: "local-airflow/db", + }, + { + Name: configVolume, + MountPath: "/opt/airflow/logs", + SubPath: "local-airflow/logs", + }, + { + Name: secretsVolume, + MountPath: "/opt/datacoves/user", + ReadOnly: true, + }, + }, + Command: []string{ + "/bin/bash", + }, + Args: []string{ + "-c", + "cp /opt/datacoves/user/webserver_config.py /opt/airflow && " + + "/entrypoint standalone", + }, + } + + if workspace.DontUseWsgi() { + dbtSyncServerContainer.Args = []string{ + "--no-uwsgi", + } + } + + if resReqs, ok := workspace.Spec.ResourceRequirements["code-server"]; ok { + codeServerContainer.Resources = resReqs + } + if resReqs, ok := workspace.Spec.ResourceRequirements["code-server-dbt-docs"]; ok { + dbtDocsContainer.Resources = resReqs + } + if resReqs, ok := workspace.Spec.ResourceRequirements["code-server-dbt-core-interface"]; ok { + dbtSyncServerContainer.Resources = resReqs + } + if resReqs, ok := workspace.Spec.ResourceRequirements["code-server-local-airflow"]; ok { + localAirflowContainer.Resources = resReqs + } + + containers := []core.Container{codeServerContainer} + + if workspace.LocalDbtDocs() { + containers = append(containers, dbtDocsContainer) + } + + if workspace.DbtSync() { + containers = append(containers, dbtSyncServerContainer) + } + + /* + * Note that the order of volumes matters -- the if statement + * immediately after this depends on the secrets being the second + * item in the volume array. + */ + volumes := []core.Volume{ + { + Name: configVolume, + VolumeSource: core.VolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ + ClaimName: configVolume, + }, + }, + }, + { + Name: secretsVolume, + VolumeSource: core.VolumeSource{ + Secret: &core.SecretVolumeSource{ + SecretName: user.SecretName, + Items: []core.KeyToPath{ + { + Key: "DATACOVES__SSL_KEYS_JSON", + Path: "ssl_keys.json", + }, + { + Key: "DATACOVES__SSH_KEYS_JSON", + Path: "ssh_keys.json", + }, + { + Key: "DATACOVES__PROFILE_FILES", + Path: "files.json", + }, + }, + }, + }, + }, + } + + /* + * We only get passed this key if local airflow is enabled + * on the django side. + * + * This relies on volumes[1] being the secrets. + */ + if user.IsLocalAirflowEnabled() { + containers = append(containers, localAirflowContainer) + volumes[1].VolumeSource.Secret.Items = + append( + volumes[1].VolumeSource.Secret.Items, + core.KeyToPath{ + Key: "DATACOVES__AIRFLOW_WEBSERVER_CONFIG", + Path: "webserver_config.py", + }, + ) + } + + replicas := int32(1) + if !user.CodeServerEnabled() { + replicas = int32(0) + } + + // Diabling service links prevents Kubernetes from injecting a bunch + // of extra environment variables we don't want. + enableServiceLinks := bool(false) + + podSpec := core.PodSpec{ + NodeSelector: u.VolumedNodeSelector, + ServiceAccountName: "code-server-" + user.Slug + "-sa", + Containers: containers, + Volumes: volumes, + HostAliases: []core.HostAlias{ + { + IP: workspace.Spec.InternalIp, + Hostnames: []string{"api." + workspace.Spec.ClusterDomain}, + }, + }, + // This prevents the extra environment variables from appearing + EnableServiceLinks: &enableServiceLinks, + } + + workspace.AddDnsToPodSpecIfNeeded(&podSpec) + + statefulSet := &apps.StatefulSet{ + ObjectMeta: meta, + Spec: apps.StatefulSetSpec{ + Selector: &v1.LabelSelector{MatchLabels: labels}, + Replicas: &replicas, + Template: core.PodTemplateSpec{ + ObjectMeta: meta, + Spec: podSpec, + }, + // Migrate Deploynent to StatefulSets? + // https://stackoverflow.com/questions/52848176/re-attach-volume-claim-on-deployment-update + // Strategy: apps.DeploymentStrategy{ + // Type: "Recreate", + //}, + }, + } + + return statefulSet +} + +func genCodeServerPersistentVolumeClaim(workspace *Workspace, user *UserSpec) *core.PersistentVolumeClaim { + return &core.PersistentVolumeClaim{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: codeServerConfigVolumeName(user), + }, + Spec: core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce}, + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: resource.MustParse("20Gi"), + }, + }, + }, + } +} + +func genServiceAccount(workspace *Workspace, user *UserSpec) *core.ServiceAccount { + return &core.ServiceAccount{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: "code-server-" + user.Slug + "-sa", + }, + ImagePullSecrets: []core.LocalObjectReference{ + { + Name: workspace.Spec.ImagePullSecret, + }, + }, + } +} + +func genRole(workspace *Workspace, user *UserSpec) *rbac.Role { + return &rbac.Role{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: codeServerName(user) + "-pod-exec-role", + }, + Rules: []rbac.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods", "pods/exec"}, + ResourceNames: []string{codeServerName(user) + "-0"}, // Suffix for statefulSet + Verbs: []string{"create", "get", "list"}, + }, + }, + } +} + +func genRoleBinding(workspace *Workspace, user *UserSpec) *rbac.RoleBinding { + return &rbac.RoleBinding{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: codeServerName(user) + "-pod-exec-rolebinding", + }, + RoleRef: rbac.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: codeServerName(user) + "-pod-exec-role", + }, + Subjects: []rbac.Subject{ + { + Kind: "ServiceAccount", + Name: codeServerName(user) + "-sa", + Namespace: workspace.Namespace, + }, + }, + } +} diff --git a/src/core/operator/controllers/user_controller.go b/src/core/operator/controllers/user_controller.go new file mode 100644 index 00000000..ef54e918 --- /dev/null +++ b/src/core/operator/controllers/user_controller.go @@ -0,0 +1,103 @@ +package controllers + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + + // apps "k8s.io/api/apps/v1" + // core "k8s.io/api/core/v1" + // rbac "k8s.io/api/rbac/v1" + + . "datacoves.com/operator/api/v1" +) + +// UserReconciler reconciles a User object +type UserReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// SetupWithManager sets up the controller with the Manager. +func (r *UserReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&User{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + WithOptions(controller.Options{MaxConcurrentReconciles: 12}). + Complete(r) +} + +//+kubebuilder:rbac:groups=datacoves.com,resources=users,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=datacoves.com,resources=users/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=datacoves.com,resources=users/finalizers,verbs=update + +func (r *UserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + got := User{} + user := &got + err = r.Get(ctx, req.NamespacedName, user) + if err != nil { + if errors.IsNotFound(err) { + // The user no longer exists. There is nothing to do and nothing has + // failed so we must return without an error (or we would be retried). + err = nil + } + return + } + if !user.DeletionTimestamp.IsZero() { + // Do nothing if the user is being deleted. + return + } + + // Add the user name to every log call from this reconcile. + logger := log.FromContext(ctx).WithName(user.Name) + ctx = log.IntoContext(ctx, logger) + log := log.FromContext(ctx) + log.Info("reconciling", "generation", user.Generation) + defer func() { + if err == nil { + log.Info("reconciled", "generation", user.Generation) + } + }() + + workspaceName := "" + for _, ref := range user.GetOwnerReferences() { + if ref.Kind == "Workspace" { + workspaceName = ref.Name + break + } + } + + if workspaceName == "" { + err = nil // NOTE: err is nil for now, so we won't retry. + log.Error(err, "user without owner", "user name", user.Name) + return + } + + gotw := Workspace{} + workspace := &gotw + err = r.Get(ctx, client.ObjectKey{Namespace: user.Namespace, Name: workspaceName}, workspace) + if err != nil { + if errors.IsNotFound(err) { + // The workspace no longer exists. There is nothing to do and nothing has + // failed so we must return without an error (or we would be retried). + err = nil + } + return + } + + err = r.codeServer(ctx, workspace, user) + if err != nil { + log.Error(err, "error in stage: code-server") + return + } + + err = nil + return +} diff --git a/src/core/operator/controllers/utils/constvars.go b/src/core/operator/controllers/utils/constvars.go new file mode 100644 index 00000000..bcd64a95 --- /dev/null +++ b/src/core/operator/controllers/utils/constvars.go @@ -0,0 +1,41 @@ +package utils + +import ( + networking "k8s.io/api/networking/v1" +) + +// Variables for k8s contants... The k8s api structs often take pointers to +// strings or ints and we just want to specify a value. But &1 or &"Prefix" is +// not valid go, you can't take the address of a constant. So you end up making +// temporary variables to take the address of, and allocating unnecessarily. + +var ( + True bool = true + False bool = false +) + +var ( + Int32_1 int32 = 1 + Int32_2 int32 = 2 + Int32_3 int32 = 3 + + Int32_0o600 int32 = 0o600 + Int32_0o644 int32 = 0o644 + Int32_0o755 int32 = 0o755 + + Int64_0 int64 = 0 + Int64_10 int64 = 10 +) + +var ( + StrNginx string = "nginx" +) + +var ( + PathTypePrefix networking.PathType = networking.PathTypePrefix +) + +var ( + VolumedNodeSelector = map[string]string{"k8s.datacoves.com/nodegroup-kind": "volumed"} + GeneralNodeSelector = map[string]string{"k8s.datacoves.com/nodegroup-kind": "general"} +) diff --git a/src/core/operator/controllers/utils/env.go b/src/core/operator/controllers/utils/env.go new file mode 100644 index 00000000..1d9a5fe8 --- /dev/null +++ b/src/core/operator/controllers/utils/env.go @@ -0,0 +1,80 @@ +package utils + +import ( + core "k8s.io/api/core/v1" +) + +// []core.EnvVar wrapper type with helper methods to specify envs more succinctly. +type Env []core.EnvVar + +func (e Env) Set(name, value string) Env { + for i, v := range e { + if v.Name == name { + e[i] = core.EnvVar{Name: name, Value: value} + return e + } + } + return append(e, core.EnvVar{Name: name, Value: value}) +} + +func (e Env) AddFromConfigMap(configMapName string, vars ...string) Env { + for _, name := range vars { + e = append(e, core.EnvVar{ + Name: name, + ValueFrom: &core.EnvVarSource{ + ConfigMapKeyRef: &core.ConfigMapKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: configMapName}, + Key: name, + }, + }, + }) + } + return e +} + +func (e Env) AddFromSecret(secretName string, vars ...string) Env { + for _, name := range vars { + e = append(e, core.EnvVar{ + Name: name, + ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: secretName}, + Key: name, + }, + }, + }) + } + return e +} + +func (e Env) RenameFromConfigMap(configMapName string, vars [][2]string) Env { + for _, kv := range vars { + k, v := kv[0], kv[1] + e = append(e, core.EnvVar{ + Name: k, + ValueFrom: &core.EnvVarSource{ + ConfigMapKeyRef: &core.ConfigMapKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: configMapName}, + Key: v, + }, + }, + }) + } + return e +} + +func (e Env) RenameFromSecret(secretName string, vars [][2]string) Env { + for _, kv := range vars { + k, v := kv[0], kv[1] + e = append(e, core.EnvVar{ + Name: k, + ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: secretName}, + Key: v, + }, + }, + }) + } + return e +} diff --git a/src/core/operator/controllers/utils/hash.go b/src/core/operator/controllers/utils/hash.go new file mode 100644 index 00000000..d581814d --- /dev/null +++ b/src/core/operator/controllers/utils/hash.go @@ -0,0 +1,107 @@ +package utils + +import ( + "context" + "crypto/sha256" + "fmt" + "sort" + + "sigs.k8s.io/controller-runtime/pkg/client" + + core "k8s.io/api/core/v1" +) + +func HashForName(data []byte) string { + return fmt.Sprintf("%x", sha256.Sum256(data))[:10] +} + +func HashSecret(secret *core.Secret) string { + h := sha256.New() + + // Iterating over go hashmaps yields items in random order, which changes the + // sha. We need to sort them first. + keys := []string{} + for k, _ := range secret.Data { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + h.Write([]byte(k)) + h.Write(secret.Data[k]) + } + + keys = []string{} + for k, _ := range secret.StringData { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(secret.StringData[k])) + } + + return fmt.Sprintf("%x", h.Sum(nil))[:10] +} + +func HashConfigMap(configMap *core.ConfigMap) string { + h := sha256.New() + + // Iterating over go hashmaps yields items in random order, which changes the + // sha. We need to sort them first. + keys := []string{} + for k, _ := range configMap.Data { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + h.Write([]byte(k)) + h.Write([]byte(configMap.Data[k])) + } + + keys = []string{} + for k, _ := range configMap.BinaryData { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + h.Write([]byte(k)) + h.Write(configMap.BinaryData[k]) + } + + return fmt.Sprintf("%x", h.Sum(nil))[:10] +} + +func GetSecretHashed(ctx context.Context, c client.Client, key client.ObjectKey) (*core.Secret, error) { + base := core.Secret{} + err := c.Get(ctx, key, &base) + if err != nil { + return nil, err + } + secret := &core.Secret{} + secret.TypeMeta = base.TypeMeta + // Intentionally not copying most metadata. + secret.Namespace = base.Namespace + secret.Name = base.Name + "-" + HashSecret(&base) + secret.Type = base.Type + secret.Immutable = &True + secret.Data = base.Data + secret.StringData = base.StringData + return secret, nil +} + +func GetConfigMapHashed(ctx context.Context, c client.Client, key client.ObjectKey) (*core.ConfigMap, error) { + base := core.ConfigMap{} + err := c.Get(ctx, key, &base) + if err != nil { + return nil, err + } + configMap := &core.ConfigMap{} + configMap.TypeMeta = base.TypeMeta + // Intentionally not copying most metadata. + configMap.Namespace = base.Namespace + configMap.Name = base.Name + "-" + HashConfigMap(&base) + configMap.Immutable = &True + configMap.Data = base.Data + configMap.BinaryData = base.BinaryData + return configMap, nil +} diff --git a/src/core/operator/controllers/workspace_airbyte.go b/src/core/operator/controllers/workspace_airbyte.go new file mode 100644 index 00000000..5603cfae --- /dev/null +++ b/src/core/operator/controllers/workspace_airbyte.go @@ -0,0 +1,134 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + core "k8s.io/api/core/v1" + rbac "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) airbyte(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + + ns := workspace.Namespace + releaseName := workspace.Name + "-airbyte" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + // airbyte admin service account + err = reconcileServiceAccount(ctx, r.Client, r.Scheme, workspace, GenSchedulerServiceAccount(workspace)) + if err != nil { + return err + } + err = reconcileRole(ctx, r.Client, r.Scheme, workspace, GenSchedulerRole(workspace)) + if err != nil { + return err + } + err = reconcileRoleBinding(ctx, r.Client, r.Scheme, workspace, GenSchedulerRoleBinding(workspace)) + if err != nil { + return err + } + + if !workspace.ServiceEnabled("airbyte") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["airbyte"]["repo"], + RepoName: workspace.Spec.Charts["airbyte"]["repo_name"], + Chart: workspace.Spec.Charts["airbyte"]["chart"], + Version: workspace.Spec.Charts["airbyte"]["version"], + ValuesName: workspace.Spec.Configs["airbyte-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} + +func GenSchedulerServiceAccount(workspace *Workspace) *core.ServiceAccount { + return &core.ServiceAccount{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: "airbyte-admin", + }, + ImagePullSecrets: []core.LocalObjectReference{ + { + Name: workspace.Spec.ImagePullSecret, + }, + }, + } +} + +func GenSchedulerRole(workspace *Workspace) *rbac.Role { + return &rbac.Role{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: "airbyte-admin-role", + }, + Rules: []rbac.PolicyRule{ + { + APIGroups: []string{"*"}, + Resources: []string{"jobs", "pods", "pods/log", "pods/exec", "pods/attach", "secrets", "configmaps"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + }, + } +} + +func GenSchedulerRoleBinding(workspace *Workspace) *rbac.RoleBinding { + return &rbac.RoleBinding{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: "airbyte-admin-binding", + }, + RoleRef: rbac.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: "airbyte-admin-role", + }, + Subjects: []rbac.Subject{ + { + Kind: "ServiceAccount", + Name: "airbyte-admin", + }, + }, + } +} diff --git a/src/core/operator/controllers/workspace_airflow.go b/src/core/operator/controllers/workspace_airflow.go new file mode 100644 index 00000000..5f277456 --- /dev/null +++ b/src/core/operator/controllers/workspace_airflow.go @@ -0,0 +1,120 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) airflow(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-airflow" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.ServiceEnabled("airflow") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["airflow"]["repo"], + RepoName: workspace.Spec.Charts["airflow"]["repo_name"], + Chart: workspace.Spec.Charts["airflow"]["chart"], + Version: workspace.Spec.Charts["airflow"]["version"], + ValuesName: workspace.Spec.Configs["airflow-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} + +func (r *WorkspaceReconciler) airflowPromtail(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-airflow-promtail" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.ServiceEnabled("airflow") || !workspace.ServiceEnabled("AirflowLogs") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["promtail"]["repo"], + RepoName: workspace.Spec.Charts["promtail"]["repo_name"], + Chart: workspace.Spec.Charts["promtail"]["chart"], + Version: workspace.Spec.Charts["promtail"]["version"], + ValuesName: workspace.Spec.Configs["airflow-promtail-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/controllers/workspace_controller.go b/src/core/operator/controllers/workspace_controller.go new file mode 100644 index 00000000..1dd07673 --- /dev/null +++ b/src/core/operator/controllers/workspace_controller.go @@ -0,0 +1,388 @@ +package controllers + +import ( + "context" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" + + // apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + + // rbac "k8s.io/api/rbac/v1" + + . "datacoves.com/operator/api/v1" +) + +// WorkspaceReconciler reconciles a Workspace object +type WorkspaceReconciler struct { + client.Client + Scheme *runtime.Scheme +} + +// Service describes a service within the workspace (e.g. airbyte, code-server). +// Ingress rules and pomerium configuration is derived from these records. +type Service struct { + Kind string // The kind of service. Values are the same as workspace.Services keys. + Name string // Name of the kubernetes service resource. + Host string // Host/domain where the service is externally accessible. + DomainPrefix string // Prefix used in the domain. + PathPrefix string // https://www.pomerium.com/reference/#prefix + User *UserSpec // Who this service is for, or nil if the resource is namespace wide. + + Selector map[string]string // If not set, {"app": service.Name} is used. + Port int32 + TargetPort intstr.IntOrString + Exists bool // If existing, it means it won't create the service + Websockets bool + AllowAnyAuthenticatedUser bool + AllowPublicUnauthenticatedAccess bool + PreserveHostHeader bool + ProxyInterceptErrors bool +} + +// SetupWithManager sets up the controller with the Manager. +func (r *WorkspaceReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&Workspace{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Owns(&User{}). + Owns(&HelmRelease{}). + // NOTE: We could watch the owned resources we create too + // (e.g. Deployments) and that way the operator would try to reset any + // modifications to them. The problem is that our own updates to those + // resources also trigger reconciliation, and it can be hard to program + // in a way that doesn't go into an infinite update-reconcile loop. + // Reconciliation already has to be idempotent. To watch owned+created + // resources we would also have to make it achieve a fixed point. + // So we don't do that. We assume we are the only ones touching the + // resources we create, and we are not triggered by changes to them. + // NOTE: Watching the secrets and configmaps we depend on means that + // when multiple resources are modified at once (by workspace.py), then + // multiple reconciliations are triggered. This works OK. It could be + // better to watch only the Workspace resource and have it reference + // immutable secrets and configmaps so that django can create all the + // new secrets and update the Workspace once, triggering a single reconcile. + Watches( + &source.Kind{Type: &core.Secret{}}, + handler.EnqueueRequestsFromMapFunc(r.enqueueForWatched), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches( + &source.Kind{Type: &core.ConfigMap{}}, + handler.EnqueueRequestsFromMapFunc(r.enqueueForWatched), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + // As long as we have 1 workspace this parameter improves nothing, so + // let's not risk it for now. + // WithOptions(controller.Options{MaxConcurrentReconciles: 12}). + Complete(r) +} + +// When a watched obj changes, trigger a reconcile if the obj has a workspace annotation. +func (r *WorkspaceReconciler) enqueueForWatched(obj client.Object) []reconcile.Request { + ns := obj.GetNamespace() + annots := obj.GetAnnotations() + if workspaceName, found := annots["datacoves.com/workspace"]; found && workspaceName != "" { + workspace := Workspace{} + err := r.Get(context.TODO(), client.ObjectKey{Namespace: ns, Name: workspaceName}, &workspace) + if err != nil { + // If we can't get a workspace with that name, don't reconcile. + return []reconcile.Request{} + } + + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{ + Name: workspace.Name, + Namespace: workspace.Namespace, + }, + }, + } + } + return []reconcile.Request{} +} + +//+kubebuilder:rbac:groups=datacoves.com,resources=workspaces,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=datacoves.com,resources=workspaces/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=datacoves.com,resources=workspaces/finalizers,verbs=update + +//+kubebuilder:rbac:groups=datacoves.com,resources=users,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=datacoves.com,resources=users/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=datacoves.com,resources=users/finalizers,verbs=update + +//+kubebuilder:rbac:groups=apps,resources=deployments;daemonsets;replicasets;statefulsets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=networking.k8s.io,resources=ingresses;networkpolicies,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings;clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups="",resources=pods;services;persistentvolumeclaims;configmaps;secrets;serviceaccounts;namespaces;events,verbs=get;list;watch;create;update;patch;delete + +// For airbyte-admin service account... +//+kubebuilder:rbac:groups=*,resources=cronjobs;jobs;pods;pods/attach;pods/exec;pods/log,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=autoscaling,resources=horizontalpodautoscalers,verbs=create;delete;get;list;patch;update;watch +//+kubebuilder:rbac:groups=policy,resources=poddisruptionbudgets,verbs=create;delete;get;list;patch;update;watch + +func (r *WorkspaceReconciler) Reconcile(ctx context.Context, req ctrl.Request) (res ctrl.Result, err error) { + ns := core.Namespace{} + err = r.Get(ctx, client.ObjectKey{Name: req.Namespace}, &ns) + if err != nil { + if errors.IsNotFound(err) { + err = nil + } + return + } + if !ns.DeletionTimestamp.IsZero() { + // Do nothing if the namespace is being deleted. + return + } + + got := Workspace{} + workspace := &got + err = r.Get(ctx, req.NamespacedName, workspace) + if err != nil { + if errors.IsNotFound(err) { + // The workspace no longer exists. There is nothing to do and nothing has + // failed so we must return without an error (or we would be retried). + err = nil + } + return + } + if !workspace.DeletionTimestamp.IsZero() { + // Do nothing if the workspace is being deleted. + return + } + + // Add the workspace name to every log call from this reconcile. + logger := log.FromContext(ctx).WithName(workspace.Name) + ctx = log.IntoContext(ctx, logger) + log := log.FromContext(ctx) + log.Info("reconciling", "generation", workspace.Generation) + defer func() { + if err == nil { + log.Info("reconciled", "generation", workspace.Generation) + } + }() + + services := genServices(workspace) + + r.networkPolicies(ctx, workspace) + + err = r.services(ctx, workspace, services) + if err != nil { + log.Error(err, "error in stage: services") + return + } + + err = r.ingress(ctx, workspace, services) + if err != nil { + log.Error(err, "error in stage: ingress") + return + } + + err = addImagePullSecretToDefaultServiceAccount(ctx, r.Client, workspace.Namespace, workspace.Spec.ImagePullSecret) + if err != nil { + log.Error(err, "error in stage: imagePullSecret") + return + } + + err = r.pomerium(ctx, workspace, services) + if err != nil { + log.Error(err, "error in stage: pomerium") + return + } + + err = r.users(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: users") + return + } + + err = r.superset(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: superset") + return + } + + err = r.minio(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: minio") + return + } + + err = r.elastic(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: elastic") + return + } + + err = r.neo4j(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: neo4j") + return + } + + err = r.postgresql(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: postgresql") + return + } + + err = r.kafka(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: kafka") + return + } + + err = r.datahub(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: datahub") + return + } + + err = r.airbyte(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: airbyte") + return + } + + err = r.dbtDocs(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: dbt docs") + return + } + + err = r.airflow(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: airflow") + return + } + + err = r.airflowPromtail(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: airlfowPromtail") + return + } + + err = nil + return +} + +func (r *WorkspaceReconciler) users(ctx context.Context, workspace *Workspace) error { + // Get existing users and put them into the users map. + userList := UserList{} + err := r.List(ctx, &userList, &client.ListOptions{Namespace: workspace.Namespace}) + if err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("users list: %w", err) + } + users := map[string]User{} + for _, user := range userList.Items { + users[user.Name] = user + } + + // Create or update users comparing existing users with the workspace spec. + for _, spec := range workspace.Spec.Users { + user, exists := users[spec.Slug] + if !exists || !user.Spec.Equals(spec) { + err = r.user(ctx, workspace, spec) + if err != nil { + return fmt.Errorf("user (%s): %w", user.Name, err) + } + } + // Remove the user from the users map so that when this for loop endsthe + // ones that remain are those not in the workspace spec. + delete(users, spec.Slug) + } + + log := log.FromContext(ctx) + + // Delete users not in the spec. + // TODO: Sort users before iterating to avoid infinite reconcile retries. + // Random range iteration order + controller runtime retries = trouble. + for _, user := range users { + log.Info("delete User", "name", user.Name) + err = r.Delete(ctx, &user) + if err != nil && !errors.IsNotFound(err) { + return fmt.Errorf("user delete: %w", err) + } + } + + return nil +} + +func (r *WorkspaceReconciler) user(ctx context.Context, workspace *Workspace, spec UserSpec) error { + name := spec.Slug + + user := &User{ + ObjectMeta: v1.ObjectMeta{Namespace: workspace.GetNamespace(), Name: name}, + Spec: spec, + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err := ctrlu.SetControllerReference(workspace, user, r.Scheme) + if err != nil { + return err + } + + log := log.FromContext(ctx) + + got := User{} + err = r.Get(ctx, client.ObjectKeyFromObject(user), &got) + if err != nil && !errors.IsNotFound(err) { + return err + } + if errors.IsNotFound(err) { + log.Info("create User", "name", name) + return r.Create(ctx, user) + } + user.ResourceVersion = got.ResourceVersion + log.Info("update User", "name", name) + return r.Update(ctx, user) +} + +// imagePullSecret sets the image pull secret of the workspace on the namespace's +// default service account, so all pods in the namespace can use it to pull +// images. +func addImagePullSecretToDefaultServiceAccount(ctx context.Context, c client.Client, namespace, secretName string) error { + if secretName == "" { + // TODO: Review. Might change imagePullSecret to be required and return + // an error here. + return nil + } + + sa := core.ServiceAccount{} + err := c.Get(ctx, client.ObjectKey{Name: "default", Namespace: namespace}, &sa) + if err != nil { + return err + } + + found := false + for _, ips := range sa.ImagePullSecrets { + if ips.Name == secretName { + found = true + break + } + } + + if !found { + // TODO: https://aws.github.io/aws-eks-best-practices/security/docs/iam/#disable-auto-mounting-of-service-account-tokens + // sa.AutomountServiceAccountToken = &u.False + sa.ImagePullSecrets = append(sa.ImagePullSecrets, core.LocalObjectReference{ + Name: secretName, + }) + return c.Update(ctx, &sa) + } + + return nil +} diff --git a/src/core/operator/controllers/workspace_datahub.go b/src/core/operator/controllers/workspace_datahub.go new file mode 100644 index 00000000..346626e2 --- /dev/null +++ b/src/core/operator/controllers/workspace_datahub.go @@ -0,0 +1,67 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) datahub(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-datahub" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.ServiceEnabled("datahub") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["datahub"]["repo"], + RepoName: workspace.Spec.Charts["datahub"]["repo_name"], + Chart: workspace.Spec.Charts["datahub"]["chart"], + Version: workspace.Spec.Charts["datahub"]["version"], + ValuesName: workspace.Spec.Configs["datahub-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/controllers/workspace_dbt_docs.go b/src/core/operator/controllers/workspace_dbt_docs.go new file mode 100644 index 00000000..06bec36e --- /dev/null +++ b/src/core/operator/controllers/workspace_dbt_docs.go @@ -0,0 +1,265 @@ +package controllers + +import ( + "context" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/log" + + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + + . "datacoves.com/operator/api/v1" + u "datacoves.com/operator/controllers/utils" +) + +func (r *WorkspaceReconciler) dbtDocs(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + + if !workspace.ServiceEnabled("dbt-docs") { + err := deleteDeployment(ctx, r.Client, workspace.Namespace, "dbt-docs") + return err + } + + secretsName := workspace.Spec.Configs["dbt-docs-git-sync-secrets"] + + deployment := genDbtDocsDeployment(workspace, secretsName) + err := reconcileDeployment(ctx, r.Client, r.Scheme, workspace, deployment) + if err != nil { + // Way to apply changes to immutable fields + log.Error(err, "deployment could not be created or updated") + log.Info("trying to recreate deployment:", "deplopyment", "dbt-docs") + err := deleteDeployment(ctx, r.Client, workspace.Namespace, "dbt-docs") + if err != nil { + return err + } + err = reconcileDeployment(ctx, r.Client, r.Scheme, workspace, deployment) + if err != nil { + return err + } + + return err + } + + return err +} + +func genDbtDocsDeployment(workspace *Workspace, gitSyncSecretsName string) *apps.Deployment { + name := "dbt-docs" + + labels := map[string]string{ + "app": name, + "datacoves.com/adapter": name, + } + + htmlVolumeName := "dbt-docs-volume" + + meta := v1.ObjectMeta{ + Name: name, + Namespace: workspace.Namespace, + Labels: labels, + } + + gitRepo := workspace.Spec.SshGitRepo + gitSyncSsh := "true" + + if workspace.Spec.GitCloneStrategy != "ssh_clone" { + gitRepo = workspace.Spec.HttpGitRepo + gitSyncSsh = "false" + } + + gitRepoParts := strings.Split(gitRepo, "/") + gitRepoPath := gitRepoParts[len(gitRepoParts)-1] + + dbtDocsContainer := core.Container{ + Name: "dbt-docs", + Image: workspace.ImageName("datacovesprivate/observe-dbt-docs"), + ImagePullPolicy: core.PullIfNotPresent, + Ports: []core.ContainerPort{{ContainerPort: 80, Protocol: core.ProtocolTCP, Name: "http"}}, + LivenessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/healthz", + Port: intstr.IntOrString{ + Type: intstr.Type(1), + IntVal: 0, + StrVal: "http", + }, + }, + }, + InitialDelaySeconds: 60, + TimeoutSeconds: 1, + PeriodSeconds: 10, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + ReadinessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/healthz", + Port: intstr.IntOrString{ + Type: intstr.Type(1), + IntVal: 0, + StrVal: "http", + }, + }, + }, + InitialDelaySeconds: 5, + TimeoutSeconds: 1, + PeriodSeconds: 5, + SuccessThreshold: 1, + FailureThreshold: 3, + }, + Env: u.Env{}. + Set("DBT_DOCS_GIT_PATH", gitRepoPath), + VolumeMounts: []core.VolumeMount{ + { + Name: htmlVolumeName, + MountPath: "/usr/share/nginx/html/repo", + ReadOnly: true, + }, + }, + } + + gitSyncImage := workspace.ImageName("registry.k8s.io/git-sync/git-sync") + gitSyncImageParts := strings.Split(gitSyncImage, ":") + gitSyncImageTag := gitSyncImageParts[len(gitSyncImageParts)-1] + gitSyncRoot := "/git" + + // To avoid duplicates of "refs/heads/" in the branch name + gitBranch := "refs/heads/" + strings.Replace(workspace.Spec.DbtDocsGitBranch, "refs/heads/", "", 1) + env := u.Env{} + + if strings.HasPrefix(gitSyncImageTag, "v3") { + // git-sync v3 + env = env. + Set("GIT_SYNC_REPO", gitRepo). + Set("GIT_SYNC_SSH", gitSyncSsh). + Set("GIT_SYNC_BRANCH", gitBranch). + Set("GIT_KNOWN_HOSTS", "false"). + Set("GIT_SYNC_DEPTH", "1"). + Set("GIT_PATH_CLONE", "300"). + Set("GIT_SYNC_ROOT", gitSyncRoot). + Set("GIT_SYNC_MAX_SYNC_FAILURES", "5") + + if len(workspace.Spec.DbtDocsAskpassUrl) > 0 { + env = env. + Set("GIT_SYNC_ASKPASS_URL", workspace.Spec.DbtDocsAskpassUrl) + } + } else { + // git-sync v4 + env = env. + Set("GITSYNC_REPO", gitRepo). + Set("GIT_SYNC_SSH", gitSyncSsh). + Set("GITSYNC_REF", gitBranch). + Set("GITSYNC_SSH_KNOWN_HOSTS", "false"). + Set("GITSYNC_DEPTH", "1"). + Set("GITSYNC_SYNC_TIMEOUT", "300s"). + Set("GITSYNC_ROOT", gitSyncRoot). + Set("GITSYNC_MAX_FAILURES", "5") + + if len(workspace.Spec.DbtDocsAskpassUrl) > 0 { + env = env. + Set("GITSYNC_ASKPASS_URL", workspace.Spec.DbtDocsAskpassUrl) + } + } + + volumeMounts := []core.VolumeMount{ + { + Name: htmlVolumeName, + MountPath: gitSyncRoot, + }, + } + + htmlVolumeLimit := resource.MustParse("12Gi") + volumes := []core.Volume{ + { + Name: htmlVolumeName, + VolumeSource: core.VolumeSource{ + EmptyDir: &core.EmptyDirVolumeSource{ + SizeLimit: &htmlVolumeLimit, + }, + }, + }, + } + + if gitSyncSsh == "false" { + if len(workspace.Spec.DbtDocsAskpassUrl) == 0 { + if strings.HasPrefix(gitSyncImageTag, "v3") { + // git-sync v3 + env = env.AddFromSecret(gitSyncSecretsName, + "GIT_SYNC_USERNAME", + "GIT_SYNC_PASSWORD", + ) + } else { + // git-sync v4 + env = env.AddFromSecret(gitSyncSecretsName, + "GITSYNC_USERNAME", + "GITSYNC_PASSWORD", + ) + } + } + } else { + sshKeyVolumeName := "ssh-key-volume" + volumeMounts = append(volumeMounts, core.VolumeMount{ + Name: sshKeyVolumeName, + MountPath: "/etc/git-secret", + }) + volumes = append(volumes, core.Volume{ + Name: sshKeyVolumeName, + VolumeSource: core.VolumeSource{ + Secret: &core.SecretVolumeSource{ + SecretName: gitSyncSecretsName, + Items: []core.KeyToPath{ + {Key: "gitSshKey", Path: "ssh"}, + }, + DefaultMode: &u.Int32_0o644, + }, + }, + }) + } + + gitSyncContainer := core.Container{ + Name: "git-sync", + Image: gitSyncImage, + ImagePullPolicy: core.PullIfNotPresent, + Env: env, + VolumeMounts: volumeMounts, + } + + if resReqs, ok := workspace.Spec.ResourceRequirements["dbt-docs"]; ok { + dbtDocsContainer.Resources = resReqs + gitSyncContainer.Resources = resReqs + } + + containers := []core.Container{ + dbtDocsContainer, + gitSyncContainer, + } + + deployment := &apps.Deployment{ + ObjectMeta: meta, + Spec: apps.DeploymentSpec{ + Selector: &v1.LabelSelector{MatchLabels: labels}, + Replicas: &u.Int32_1, + Template: core.PodTemplateSpec{ + ObjectMeta: meta, + Spec: core.PodSpec{ + NodeSelector: u.GeneralNodeSelector, + Containers: containers, + Volumes: volumes, + // TODO: Review running pod as root, if we switch to SSH. + // https://github.com/kubernetes/git-sync/blob/release-3.x/docs/ssh.md + // SecurityContext: &core.PodSecurityContext{ + // RunAsUser: &u.Int64_0, + // }, + }, + }, + }, + } + + return deployment +} diff --git a/src/core/operator/controllers/workspace_elastic.go b/src/core/operator/controllers/workspace_elastic.go new file mode 100644 index 00000000..b3fe57d7 --- /dev/null +++ b/src/core/operator/controllers/workspace_elastic.go @@ -0,0 +1,67 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) elastic(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-elastic" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.InternalServiceEnabled("elastic") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["elastic"]["repo"], + RepoName: workspace.Spec.Charts["elastic"]["repo_name"], + Chart: workspace.Spec.Charts["elastic"]["chart"], + Version: workspace.Spec.Charts["elastic"]["version"], + ValuesName: workspace.Spec.Configs["elastic-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/controllers/workspace_ingress.go b/src/core/operator/controllers/workspace_ingress.go new file mode 100644 index 00000000..bac946e0 --- /dev/null +++ b/src/core/operator/controllers/workspace_ingress.go @@ -0,0 +1,376 @@ +package controllers + +import ( + "context" + "os" + "strconv" + "strings" + + "sigs.k8s.io/controller-runtime/pkg/client" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + // apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + networking "k8s.io/api/networking/v1" + + // rbac "k8s.io/api/rbac/v1" + + . "datacoves.com/operator/api/v1" + u "datacoves.com/operator/controllers/utils" +) + +// genServices generates all the workspace services. +func genServices(workspace *Workspace) []Service { + workbenchPort := 80 + workbenchWebsockets := false + if os.Getenv("LOCAL_WORKBENCH_IMAGE") != "" { + workbenchPort = 3000 + workbenchWebsockets = true + } + + services := []Service{ + { + Kind: "pomerium", + Name: "pomerium", + DomainPrefix: "authenticate", + }, + { + Kind: "workbench", + Name: "core-workbench-svc.core", + DomainPrefix: "", + TargetPort: intstr.FromInt(workbenchPort), + Websockets: workbenchWebsockets, + AllowAnyAuthenticatedUser: true, + Exists: true, + ProxyInterceptErrors: true, + }, + } + + if workspace.ServiceEnabled("airbyte") { + services = append(services, Service{ + Kind: "airbyte", + Name: workspace.Name + "-airbyte-airbyte-webapp-svc", + DomainPrefix: "airbyte", + Websockets: true, + Exists: true, + ProxyInterceptErrors: true, + }) + } + + if workspace.ServiceEnabled("superset") { + services = append(services, Service{ + Kind: "superset", + Name: workspace.Name + "-superset", + DomainPrefix: "superset", + TargetPort: intstr.FromInt(8088), + Port: 8088, + Exists: true, + ProxyInterceptErrors: true, + }) + } + + if workspace.ServiceEnabled("airflow") { + services = append(services, Service{ + Kind: "airflow", + Name: workspace.Name + "-airflow-webserver", + DomainPrefix: "airflow", + TargetPort: intstr.FromInt(8080), + Port: 8080, + Exists: true, + ProxyInterceptErrors: true, + }) + services = append(services, Service{ + Kind: "airflow", + Name: workspace.Name + "-airflow-webserver", + DomainPrefix: "api-airflow", + PathPrefix: "/api", + TargetPort: intstr.FromInt(8080), + Port: 8080, + Exists: true, + AllowPublicUnauthenticatedAccess: true, + ProxyInterceptErrors: false, + }) + } + + if workspace.ServiceEnabled("dbt-docs") { + services = append(services, Service{ + Kind: "dbt-docs", + Name: "dbt-docs", + DomainPrefix: "dbt-docs", + AllowAnyAuthenticatedUser: true, + ProxyInterceptErrors: true, + }) + } + + if workspace.ServiceEnabled("datahub") { + services = append(services, Service{ + Kind: "datahub", + Name: workspace.Name + "-datahub-datahub-frontend", + DomainPrefix: "datahub", + TargetPort: intstr.FromInt(9002), + Port: 9002, + Exists: true, + ProxyInterceptErrors: true, + }) + } + + for i, user := range workspace.Spec.Users { + if workspace.ServiceEnabled("code-server") && user.HasPermissionForService("code-server") { + services = append(services, + Service{ + Kind: "code-server", + Name: "code-server-" + user.Slug, + DomainPrefix: user.Slug + "-transform", // TODO: Rename to user.Slug + "-code", + User: &workspace.Spec.Users[i], + TargetPort: intstr.FromInt(codeServerPort), + Websockets: true, + ProxyInterceptErrors: false, + }, + Service{ + Kind: "code-server", // Considered part of code-server. + Name: "dbt-docs-" + user.Slug, + Selector: map[string]string{"app": "code-server-" + user.Slug}, + DomainPrefix: user.Slug + "-dbt-docs", + User: &workspace.Spec.Users[i], + ProxyInterceptErrors: true, + }, + ) + + if user.IsLocalAirflowEnabled() { + services = append(services, + Service{ + Kind: "code-server", // Considered part of code-server. + Name: "airflow-" + user.Slug, + Selector: map[string]string{"app": "code-server-" + user.Slug}, + DomainPrefix: user.Slug + "-airflow", + User: &workspace.Spec.Users[i], + TargetPort: intstr.FromInt(localAirflowPort), + Port: localAirflowPort, + PreserveHostHeader: true, + ProxyInterceptErrors: true, + }, + ) + } + + if user.CodeServerAccess != "private" { + sharedCodeServerService := Service{ + Kind: "code-server", + Name: "shared-code-server-" + user.Slug, + Selector: map[string]string{"app": "code-server-" + user.Slug}, + DomainPrefix: user.CodeServerShareCode, + User: &workspace.Spec.Users[i], + TargetPort: intstr.FromInt(codeServerPort), + Websockets: true, + ProxyInterceptErrors: false, + } + if user.CodeServerAccess == "authenticated" { + sharedCodeServerService.AllowAnyAuthenticatedUser = true + } else if user.CodeServerAccess == "public" { + sharedCodeServerService.AllowPublicUnauthenticatedAccess = true + } + services = append(services, + sharedCodeServerService, + ) + } + + for exposureKey, exposureOptions := range user.CodeServerExposures { + iport, err := strconv.Atoi(exposureOptions["port"]) + if err == nil { + service := Service{ + Kind: "code-server", + Selector: map[string]string{"app": "code-server-" + user.Slug}, + Name: "code-server-" + exposureKey + "-" + user.Slug, + DomainPrefix: exposureOptions["share_code"], + User: &workspace.Spec.Users[i], + Port: int32(iport), + TargetPort: intstr.FromInt(iport), + PreserveHostHeader: true, + ProxyInterceptErrors: true, + } + if user.CodeServerAccess == "authenticated" || exposureOptions["access"] == "authenticated" { + service.AllowAnyAuthenticatedUser = true + } else if user.CodeServerAccess == "public" || exposureOptions["access"] == "public" { + service.AllowPublicUnauthenticatedAccess = true + } + if exposureOptions["websockets"] == "true" { + service.Websockets = true + } + services = append(services, + service, + ) + } + } + } + + } + + for i, service := range services { + prefix := "" + if service.DomainPrefix != "" { + prefix = service.DomainPrefix + "-" + } + services[i].Host = prefix + workspace.Name + "." + workspace.Spec.ClusterDomain + } + + return services +} + +func (r *WorkspaceReconciler) services(ctx context.Context, workspace *Workspace, services []Service) error { + for _, service := range services { + if service.Exists { + continue + } + err := reconcileService(ctx, r.Client, r.Scheme, workspace, genService(workspace, &service)) + if err != nil { + return err + } + } + return nil +} + +func genService(workspace *Workspace, service *Service) *core.Service { + port := int32(80) + if service.Port != 0 { + port = service.Port + } + + targetPort := intstr.FromInt(80) + if service.TargetPort != intstr.FromInt(0) { + targetPort = service.TargetPort + } + + selector := service.Selector + if selector == nil { + selector = map[string]string{"app": service.Name} + } + + return &core.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: service.Name, + Namespace: workspace.Namespace, + }, + Spec: core.ServiceSpec{ + Selector: selector, + Ports: []core.ServicePort{ + { + Protocol: core.ProtocolTCP, + Port: port, + TargetPort: targetPort, + }, + }, + }, + } +} + +// ingress reconciles the workspace's ingress rules. +func (r *WorkspaceReconciler) ingress(ctx context.Context, workspace *Workspace, services []Service) error { + ingress := genIngress(workspace, services, false) + ingressFound := networking.Ingress{} + err := r.Get(ctx, client.ObjectKeyFromObject(ingress), &ingressFound) + + if err != nil && errors.IsNotFound(err) { + err = r.Create(ctx, ingress) + } else if err != nil { + return err + } else { + err = r.Update(ctx, ingress) + } + + if err != nil { + return err + } + + ingress = genIngress(workspace, services, true) + ingressFound = networking.Ingress{} + err = r.Get(ctx, client.ObjectKeyFromObject(ingress), &ingressFound) + + if err != nil && errors.IsNotFound(err) { + return r.Create(ctx, ingress) + } else if err != nil { + return err + } else { + return r.Update(ctx, ingress) + } +} + +func genIngress(workspace *Workspace, services []Service, isIngressBackend bool) *networking.Ingress { + rules := []networking.IngressRule{} + tls := []networking.IngressTLS{} + + for _, service := range services { + host := service.Host + rule := networking.IngressRule{ + Host: host, + IngressRuleValue: networking.IngressRuleValue{ + HTTP: &networking.HTTPIngressRuleValue{ + Paths: []networking.HTTPIngressPath{ + { + Path: "/", + PathType: &u.PathTypePrefix, + Backend: networking.IngressBackend{ + Service: &networking.IngressServiceBackend{ + Name: "pomerium", + Port: networking.ServiceBackendPort{Number: 80}, + }, + }, + }, + }, + }, + }, + } + + if isIngressBackend && !service.ProxyInterceptErrors { + rules = append(rules, rule) + } else if !isIngressBackend && service.ProxyInterceptErrors { + rules = append(rules, rule) + } + + if workspace.Spec.CertManagerIssuer != "" { + tls = append(tls, networking.IngressTLS{ + Hosts: []string{host}, + SecretName: strings.ReplaceAll(host, ".", "-"), + }) + } + } + + annotations := map[string]string{ + "nginx.ingress.kubernetes.io/force-ssl-redirect": "true", + "nginx.ingress.kubernetes.io/ssl-redirect": "true", + } + + if !isIngressBackend { + var errorPages = "error_page 404 500 502 503 504 \"https://cdn." + workspace.Spec.ClusterDomain + "/service/down/\";" + annotations["nginx.ingress.kubernetes.io/server-snippet"] = "proxy_intercept_errors on; " + errorPages + } + + if workspace.Spec.CertManagerIssuer != "" { + annotations["cert-manager.io/cluster-issuer"] = workspace.Spec.CertManagerIssuer + } + + dnsUrl := workspace.Spec.ExternalDnsUrl + if dnsUrl != "" { + annotations["external-dns.alpha.kubernetes.io/alias"] = "true" + annotations["external-dns.alpha.kubernetes.io/target"] = dnsUrl + } + + ingressName := "workspace-ingress" + if isIngressBackend { + ingressName = "workspace-ingress" + "-backend" + } + + return &networking.Ingress{ + ObjectMeta: v1.ObjectMeta{ + Namespace: workspace.Namespace, + Name: ingressName, + Annotations: annotations, + }, + Spec: networking.IngressSpec{ + Rules: rules, + TLS: tls, + IngressClassName: &u.StrNginx, + }, + } +} diff --git a/src/core/operator/controllers/workspace_kafka.go b/src/core/operator/controllers/workspace_kafka.go new file mode 100644 index 00000000..32a788ee --- /dev/null +++ b/src/core/operator/controllers/workspace_kafka.go @@ -0,0 +1,67 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) kafka(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-kafka" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.InternalServiceEnabled("kafka") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["kafka"]["repo"], + RepoName: workspace.Spec.Charts["kafka"]["repo_name"], + Chart: workspace.Spec.Charts["kafka"]["chart"], + Version: workspace.Spec.Charts["kafka"]["version"], + ValuesName: workspace.Spec.Configs["kafka-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/controllers/workspace_minio.go b/src/core/operator/controllers/workspace_minio.go new file mode 100644 index 00000000..da5b4e1f --- /dev/null +++ b/src/core/operator/controllers/workspace_minio.go @@ -0,0 +1,67 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) minio(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-minio" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.InternalServiceEnabled("minio") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["minio"]["repo"], + RepoName: workspace.Spec.Charts["minio"]["repo_name"], + Chart: workspace.Spec.Charts["minio"]["chart"], + Version: workspace.Spec.Charts["minio"]["version"], + ValuesName: workspace.Spec.Configs["minio-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/controllers/workspace_neo4j.go b/src/core/operator/controllers/workspace_neo4j.go new file mode 100644 index 00000000..018fcab2 --- /dev/null +++ b/src/core/operator/controllers/workspace_neo4j.go @@ -0,0 +1,67 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) neo4j(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-neo4j" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.InternalServiceEnabled("neo4j") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["neo4j"]["repo"], + RepoName: workspace.Spec.Charts["neo4j"]["repo_name"], + Chart: workspace.Spec.Charts["neo4j"]["chart"], + Version: workspace.Spec.Charts["neo4j"]["version"], + ValuesName: workspace.Spec.Configs["neo4j-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/controllers/workspace_network_policies.go b/src/core/operator/controllers/workspace_network_policies.go new file mode 100644 index 00000000..0403ef03 --- /dev/null +++ b/src/core/operator/controllers/workspace_network_policies.go @@ -0,0 +1,783 @@ +package controllers + +import ( + "context" + "strings" + + . "datacoves.com/operator/api/v1" + core "k8s.io/api/core/v1" + networkingv1 "k8s.io/api/networking/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (r *WorkspaceReconciler) networkPolicies(ctx context.Context, workspace *Workspace) (err error) { + log := log.FromContext(ctx) + + err = r.networkPoliciesDenyAll(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for Deny All") + return + } + + err = r.networkPoliciesAllowDNS(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for Allow DNS") + return + } + + err = r.networkPoliciesAllowInternet(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for Allow Internet") + return + } + + err = r.networkPoliciesWorkspaceIsolation(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for Namespace Isolation") + return + } + + err = r.networkPoliciesOpenPomerium(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for OpenPomerium") + return + } + + err = r.networkPolicyCodeServerToCoreApi(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for CodeServerToCoreApi") + return + } + + err = r.networkPolicyCodeServerToDbtApi(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for CodeServerToDbtApi") + return + } + + err = r.networkPolicyAirflowToCoreApi(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for AirflowToCoreApi") + return + } + + err = r.networkPolicyAirflowToDbtApi(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for AirflowToDbtApi") + return + } + + err = r.networkPolicyAllowIngressController(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies to access Ingress Controller") + return + } + + err = r.networkPoliciesAllowK8sApiServer(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies to access Ingress Controller") + return + } + + err = r.networkPolicyAirflowLogsToLokiGateway(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for Airflow Logs") + return + } + + err = r.networkPolicyDbtDocsToCoreApi(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for DBT Docs") + return + } + + err = r.networkPolicyAllowCrossEnvToSameProject(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for environmest in the same project") + return + } + + // This is only for local environment + if strings.Contains(workspace.Spec.ClusterDomain, "datacoveslocal.com") { + err = r.networkPolicyAllowLocalPostgres(ctx, workspace) + if err != nil { + log.Error(err, "error in stage: Network Policies for local Postgres") + return + } + } + + return +} + +func ptrProtocol(p core.Protocol) *core.Protocol { + return &p +} + +func (r *WorkspaceReconciler) networkPoliciesDenyAll(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyDenyAll(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyDenyAll(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "deny-all", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{}, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPoliciesAllowDNS(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyAllowDNS(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyAllowDNS(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-dns", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + Ports: []networkingv1.NetworkPolicyPort{ + { + Protocol: ptrProtocol("UDP"), + Port: &intstr.IntOrString{ + Type: intstr.Type(0), + IntVal: 53, + }, + }, + }, + To: []networkingv1.NetworkPolicyPeer{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "0.0.0.0/0", + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPoliciesAllowInternet(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyAllowInternet(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyAllowInternet(workspace *Workspace) *networkingv1.NetworkPolicy { + EgressRules := []networkingv1.NetworkPolicyPeer{ + { + IPBlock: &networkingv1.IPBlock{ + CIDR: "0.0.0.0/0", + Except: []string{ + "10.0.0.0/8", + "192.168.0.0/16", + "172.16.0.0/20", + }, + }, + }, + } + if workspace.Spec.InternalDnsIp != "" { + EgressRules = append(EgressRules, networkingv1.NetworkPolicyPeer{ + IPBlock: &networkingv1.IPBlock{ + CIDR: workspace.Spec.InternalDnsIp + "/32", + }, + }) + } + if workspace.Spec.InternalDbClusterIpRange != "" { + EgressRules = append(EgressRules, networkingv1.NetworkPolicyPeer{ + IPBlock: &networkingv1.IPBlock{ + CIDR: workspace.Spec.InternalDbClusterIpRange, + }, + }) + } + + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-internet", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: EgressRules, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPoliciesWorkspaceIsolation(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyWorkspaceIsolation(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyWorkspaceIsolation(workspace *Workspace) *networkingv1.NetworkPolicy { + EgressRules := []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/workspace": workspace.Name, + }, + }, + }, + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/account": workspace.Spec.Account, + }, + }, + }, + } + if workspace.Spec.InternalDnsIp != "" { + EgressRules = append(EgressRules, networkingv1.NetworkPolicyPeer{ + IPBlock: &networkingv1.IPBlock{ + CIDR: workspace.Spec.InternalDnsIp + "/32", + }, + }) + } + if workspace.Spec.InternalDbClusterIpRange != "" { + EgressRules = append(EgressRules, networkingv1.NetworkPolicyPeer{ + IPBlock: &networkingv1.IPBlock{ + CIDR: workspace.Spec.InternalDbClusterIpRange, + }, + }) + } + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "workspace-isolation", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: EgressRules, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPoliciesOpenPomerium(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyOpenPomerium(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyOpenPomerium(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "open-pomerium", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "pomerium", + }, + }, + Ingress: []networkingv1.NetworkPolicyIngressRule{ + {}, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + {}, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Ingress"), + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyCodeServerToCoreApi(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyCodeServerToCoreApi(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyCodeServerToCoreApi(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "code-server-to-core-api", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{ + MatchLabels: map[string]string{ + "role": "code-server", + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/namespace": "core", + }, + }, + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "api", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyCodeServerToDbtApi(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyCodeServerToDbtApi(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyCodeServerToDbtApi(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-code-server-to-dbt-api", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{ + MatchLabels: map[string]string{ + "role": "code-server", + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/namespace": "core", + }, + }, + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "dbt-api", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyAirflowToCoreApi(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyAirflowToCoreApi(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyAirflowToCoreApi(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "airflow-to-core-api", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{ + MatchLabels: map[string]string{ + "tier": "airflow", + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/namespace": "core", + }, + }, + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "api", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyDbtDocsToCoreApi(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyDbtDocsToCoreApi(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyDbtDocsToCoreApi(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "dbt-docs-to-core-api", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "dbt-docs", + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/namespace": "core", + }, + }, + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "api", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyAirflowToDbtApi(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyAirflowToDbtApi(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyAirflowToDbtApi(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-airflow-to-dbt-api", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{ + MatchLabels: map[string]string{ + // for more granularity: use the "component" label. + "tier": "airflow", + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/namespace": "core", + }, + }, + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "dbt-api", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyAllowIngressController(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyAllowIngressController(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyAllowIngressController(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-ingress-controller", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/name": "ingress-nginx", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPoliciesAllowK8sApiServer(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyAllowK8sApiServer(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyAllowK8sApiServer(workspace *Workspace) *networkingv1.NetworkPolicy { + var ips []networkingv1.NetworkPolicyPeer + var ports []networkingv1.NetworkPolicyPort + + for _, ip := range workspace.Spec.ClusterApiServerIps.Ips { + ips = append(ips, networkingv1.NetworkPolicyPeer{ + IPBlock: &networkingv1.IPBlock{ + CIDR: ip + "/32", + }, + }) + } + + for _, port := range workspace.Spec.ClusterApiServerIps.Ports { + ports = append(ports, networkingv1.NetworkPolicyPort{ + Protocol: ptrProtocol("TCP"), + Port: &intstr.IntOrString{ + Type: intstr.Type(0), + IntVal: port, + }, + }) + } + + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-k8s-apiserver", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: ips, + Ports: ports, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyAirflowLogsToLokiGateway(ctx context.Context, workspace *Workspace) error { + networkPolicy := genNetworkPolicyAirflowLogsToLokiGateway(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func genNetworkPolicyAirflowLogsToLokiGateway(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-airflow-logs-to-loki-gateway", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{ + MatchLabels: map[string]string{ + // "k8s.datacoves.com/app": "airflow-promtail", + "tier": "airflow", + }, + }, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/namespace": "prometheus", + }, + }, + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "app.kubernetes.io/component": "gateway", + "app.kubernetes.io/instance": "loki", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +// This is only for local environment +func (r *WorkspaceReconciler) networkPolicyAllowLocalPostgres(ctx context.Context, workspace *Workspace) error { + networkPolicy := getNetworkPolicyAllowLocalPostgres(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +// This is only for local environment +func getNetworkPolicyAllowLocalPostgres(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-postgres", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/namespace": "core", + }, + }, + PodSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "datacoves.com/adapter": "core", + "app.kubernetes.io/name": "postgresql", + }, + }, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} + +func (r *WorkspaceReconciler) networkPolicyAllowCrossEnvToSameProject(ctx context.Context, workspace *Workspace) error { + networkPolicy := getNetworkPolicyAllowCrossEnvToSameProject(workspace) + return reconcileNetworkPolicy(ctx, r.Client, r.Scheme, workspace, networkPolicy) +} + +func getNetworkPolicyAllowCrossEnvToSameProject(workspace *Workspace) *networkingv1.NetworkPolicy { + return &networkingv1.NetworkPolicy{ + TypeMeta: v1.TypeMeta{ + Kind: "NetworkPolicy", + APIVersion: "networking.k8s.io/v1", + }, + ObjectMeta: v1.ObjectMeta{ + Name: "allow-cross-environment-to-same-project", + Namespace: workspace.GetNamespace(), + }, + Spec: networkingv1.NetworkPolicySpec{ + PodSelector: v1.LabelSelector{}, + Egress: []networkingv1.NetworkPolicyEgressRule{ + { + To: []networkingv1.NetworkPolicyPeer{ + { + NamespaceSelector: &v1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s.datacoves.com/project": workspace.Spec.Project, + }, + }, + PodSelector: &v1.LabelSelector{}, + }, + }, + }, + }, + PolicyTypes: []networkingv1.PolicyType{ + networkingv1.PolicyType("Egress"), + }, + }, + } +} diff --git a/src/core/operator/controllers/workspace_pomerium.go b/src/core/operator/controllers/workspace_pomerium.go new file mode 100644 index 00000000..7eea4a3e --- /dev/null +++ b/src/core/operator/controllers/workspace_pomerium.go @@ -0,0 +1,464 @@ +package controllers + +import ( + "context" + "fmt" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + + apps "k8s.io/api/apps/v1" + autoscaling "k8s.io/api/autoscaling/v2" + core "k8s.io/api/core/v1" + + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/yaml" + + . "datacoves.com/operator/api/v1" + u "datacoves.com/operator/controllers/utils" +) + +const ( + promeriumDeploymentName = "pomerium" + pomeriumRedisName = "pomerium-redis" + pomeriumRedisPort = 6379 +) + +type PomeriumPolicy struct { + From string `json:"from"` + To string `json:"to"` + + AllowedUsers []string `json:"allowed_users,omitempty"` + AllowedIdpClaims map[string][]string `json:"allowed_idp_claims,omitempty"` + AllowWebsockets bool `json:"allow_websockets,omitempty"` + SetResponseHeaders map[string]string `json:"set_response_headers,omitempty"` + SetRequestHeaders map[string]string `json:"set_request_headers,omitempty"` + Timeout string `json:"timeout,omitempty"` + AllowAnyAuthenticatedUser bool `json:"allow_any_authenticated_user,omitempty"` + AllowPublicUnauthenticatedAccess bool `json:"allow_public_unauthenticated_access,omitempty"` + Prefix string `json:"prefix,omitempty"` + PreserveHostHeader bool `json:"host_rewrite_header,omitempty"` +} + +// pomerium reconciles the namespace's resources required to run pomerium. +func (r *WorkspaceReconciler) pomerium(ctx context.Context, workspace *Workspace, services []Service) error { + log := log.FromContext(ctx) + + base := map[string]interface{}{} + baseSecret := core.Secret{} + baseSecretKey := client.ObjectKey{ + Namespace: workspace.Namespace, + Name: workspace.Spec.Configs["pomerium-base-config"], + } + err := r.Get(ctx, baseSecretKey, &baseSecret) + if err != nil { + return err + } + err = yaml.Unmarshal(baseSecret.Data["config.yaml"], &base) + if err != nil { + return err + } + + secret := genPomeriumConfigSecret(workspace, base, services) + + err = reconcileSecret(ctx, r.Client, r.Scheme, workspace, secret) + if err != nil { + return err + } + + deploymentPomeriun := genPomeriumDeployment(workspace, secret.Name, services) + err = reconcileDeployment(ctx, r.Client, r.Scheme, workspace, deploymentPomeriun) + if err != nil { + // Way to apply changes to immutable fields + log.Error(err, "deployment could not be created or updated") + log.Info("trying to recreate deployment:", "deplopyment", promeriumDeploymentName) + err := deleteDeployment(ctx, r.Client, workspace.Namespace, promeriumDeploymentName) + if err != nil { + return err + } + err = reconcileDeployment(ctx, r.Client, r.Scheme, workspace, deploymentPomeriun) + if err != nil { + return err + } + } + + err = reconcileService(ctx, r.Client, r.Scheme, workspace, genPomeriumRedisService(workspace)) + if err != nil { + return err + } + + err = reconcileDeployment(ctx, r.Client, r.Scheme, workspace, genPomeriumRedisDeployment(workspace)) + if err != nil { + return err + } + + if workspace.HPA() { + err = reconcileHPA(ctx, r.Client, r.Scheme, workspace, genPomeriumHPA(workspace, services)) + if err != nil { + return err + } + } + + return nil +} + +func genPomeriumDeployment(workspace *Workspace, configSecretName string, services []Service) *apps.Deployment { + labels := map[string]string{ + "app": "pomerium", + "datacoves.com/adapter": "pomerium", + } + + meta := v1.ObjectMeta{ + Name: promeriumDeploymentName, + Namespace: workspace.Namespace, + Labels: labels, + } + + authServiceUrl := "https://authenticate-" + workspace.Name + "." + workspace.Spec.ClusterDomain + + configVolumeName := "pomerium-config-volume" + + // Assuming that pomerium inits 4 services per second + delay := len(services) / 4 + // incresing probes every 10 seconds, equivalent to 40 new services, or 20 new users + livenessDelay := 30 + delay - (delay % 10) + readinessDelay := 10 + delay - (delay % 10) + + pomeriumContainer := core.Container{ + Name: "pomerium", + Image: workspace.ImageName("pomerium/pomerium"), + ImagePullPolicy: core.PullIfNotPresent, + Args: []string{"-config", "/config/config.yaml"}, + Ports: []core.ContainerPort{{ContainerPort: 80, Protocol: core.ProtocolTCP, Name: "http"}}, + VolumeMounts: []core.VolumeMount{ + { + Name: configVolumeName, + MountPath: "/config", + ReadOnly: true, + }, + }, + LivenessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/ping", + Port: intstr.FromInt(80), + }, + }, + // As the number of users grows, the pomerium config size grows and + // pomerium takes longer to start. It is killed by the liveness probe + // if it doesn't start fast enough. + InitialDelaySeconds: int32(livenessDelay), + PeriodSeconds: 30, + }, + ReadinessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Path: "/ping", + Port: intstr.FromInt(80), + }, + }, + InitialDelaySeconds: int32(readinessDelay), + PeriodSeconds: 30, + }, + // https://deepsource.io/blog/zero-downtime-deployment/#towards-zero-downtime + // The command doesn't matter, what we want is to delay the termination + // of the pod for a little while so that it is alive while the ingress + // reroutes traffic to new pods. Since the pomerium container only has + // the pomerium binary, we run `pomerium -version` instead of `sleep x`. + Lifecycle: &core.Lifecycle{ + PreStop: &core.LifecycleHandler{ + Exec: &core.ExecAction{ + Command: []string{"pomerium", "-version"}, + }, + }, + }, + Env: u.Env{}. + Set("ADDRESS", ":80"). + Set("INSECURE_SERVER", "true"). + Set("AUTHENTICATE_SERVICE_URL", authServiceUrl). + Set("AUTOCERT", "false"), + } + + if resReqs, ok := workspace.Spec.ResourceRequirements["pomerium"]; ok { + pomeriumContainer.Resources = resReqs + } + + containers := []core.Container{ + pomeriumContainer, + } + + volumes := []core.Volume{ + { + Name: configVolumeName, + VolumeSource: core.VolumeSource{ + Secret: &core.SecretVolumeSource{ + SecretName: configSecretName, + Items: []core.KeyToPath{ + {Key: "config.yaml", Path: "config.yaml"}, + }, + DefaultMode: &u.Int32_0o644, + }, + }, + }, + } + + // With maxUnavaliable = 0 and maxSurge > 1, existing pods aren't terminated + // before new ones start. + maxUnavailable := intstr.FromInt(0) + maxSurge := intstr.FromInt(3) + minReplicas := int32(1 + len(services)/120) + if minReplicas > 5 { + minReplicas = 5 + } + + deployment := &apps.Deployment{ + ObjectMeta: meta, + Spec: apps.DeploymentSpec{ + Selector: &v1.LabelSelector{MatchLabels: labels}, + Replicas: &minReplicas, + Template: core.PodTemplateSpec{ + ObjectMeta: meta, + Spec: core.PodSpec{ + NodeSelector: u.GeneralNodeSelector, + Containers: containers, + Volumes: volumes, + HostAliases: []core.HostAlias{ + { + IP: workspace.Spec.InternalIp, + Hostnames: []string{ + "api." + workspace.Spec.ClusterDomain, + }, + }, + }, + }, + }, + MinReadySeconds: int32(1), + Strategy: apps.DeploymentStrategy{ + Type: apps.RollingUpdateDeploymentStrategyType, + RollingUpdate: &apps.RollingUpdateDeployment{ + MaxUnavailable: &maxUnavailable, + MaxSurge: &maxSurge, + }, + }, + }, + } + + return deployment +} + +func genPomeriumConfigSecret(workspace *Workspace, base map[string]interface{}, services []Service) *core.Secret { + var pomeriumConfigLabels = map[string]string{"app": "pomerium", "datacoves.com/adapter": "pomerium"} + + // Copy the base config to config (shallow). + config := make(map[string]interface{}, len(base)) + for k, v := range base { + config[k] = v + } + + policies := []PomeriumPolicy{} + for _, service := range services { + if service.Name == "pomerium" || !workspace.ServiceEnabled(service.Kind) { + continue + } + to := "http://" + service.Name + if service.Port != 0 { + to += fmt.Sprintf(":%d", service.Port) + } + + policy := PomeriumPolicy{ + From: "https://" + service.Host, + To: to, + SetResponseHeaders: map[string]string{ + // This header allows the service to be embedded in an iframe + // from the workbench domain. + "Content-Security-Policy": "frame-ancestors 'self' " + workspace.WorkbenchDomain() + ";", + }, + SetRequestHeaders: map[string]string{ + "X-Forwarded-Proto": "https", + }, + AllowWebsockets: service.Websockets, + PreserveHostHeader: service.PreserveHostHeader, + } + + if service.PathPrefix != "" { + policy.Prefix = service.PathPrefix + } + + if service.Kind == "airbyte" { + policy.Timeout = "300s" + } + + if service.AllowPublicUnauthenticatedAccess { + // public access + policy.AllowPublicUnauthenticatedAccess = true + } else if service.AllowAnyAuthenticatedUser { + // authenticated users + policy.AllowAnyAuthenticatedUser = true + } else { + // specific authenticated users + users := []string{} + if service.User == nil { + for _, user := range workspace.Spec.Users { + for _, permission := range user.Permissions { + if permission.Service == service.DomainPrefix && permission.Path == service.PathPrefix { + users = append(users, user.Email) + } + } + } + policy.AllowedIdpClaims = map[string][]string{ + workspace.Spec.OidcUserId: users, + } + } else { + policy.AllowedIdpClaims = map[string][]string{ + workspace.Spec.OidcUserId: {service.User.Email}, + } + } + } + + policies = append(policies, policy) + } + config["policy"] = policies + + data, err := yaml.Marshal(config) + if err != nil { + panic(err) + } + + h := u.HashForName(data) + + return &core.Secret{ + ObjectMeta: v1.ObjectMeta{ + Name: "pomerium-config-" + h, + Namespace: workspace.Namespace, + Labels: pomeriumConfigLabels, + }, + Type: core.SecretTypeOpaque, + Data: map[string][]byte{"config.yaml": data}, + } +} + +func genPomeriumRedisDeployment(workspace *Workspace) *apps.Deployment { + labels := map[string]string{ + "app": pomeriumRedisName, + } + + meta := v1.ObjectMeta{ + Name: pomeriumRedisName, + Namespace: workspace.Namespace, + Labels: labels, + } + + usersVolumeName := "pomerium-redis-users" + + redisContainer := core.Container{ + Name: "redis", + Image: workspace.ImageName("datacovesprivate/pomerium-redis"), + ImagePullPolicy: core.PullIfNotPresent, + Ports: []core.ContainerPort{{ContainerPort: 6379, Protocol: core.ProtocolTCP}}, + VolumeMounts: []core.VolumeMount{ + { + Name: usersVolumeName, + MountPath: "/etc/redis/users.acl", + SubPath: "users.acl", + ReadOnly: true, + }, + }, + } + + if resReqs, ok := workspace.Spec.ResourceRequirements[pomeriumRedisName]; ok { + redisContainer.Resources = resReqs + } + + containers := []core.Container{ + redisContainer, + } + + volumes := []core.Volume{ + { + Name: usersVolumeName, + VolumeSource: core.VolumeSource{ + Secret: &core.SecretVolumeSource{ + SecretName: workspace.Spec.Configs["pomerium-redis-users"], + Items: []core.KeyToPath{ + {Key: "users.acl", Path: "users.acl"}, + }, + DefaultMode: &u.Int32_0o644, + }, + }, + }, + } + + deployment := &apps.Deployment{ + ObjectMeta: meta, + Spec: apps.DeploymentSpec{ + Selector: &v1.LabelSelector{MatchLabels: labels}, + Replicas: &u.Int32_1, + Template: core.PodTemplateSpec{ + ObjectMeta: meta, + Spec: core.PodSpec{ + NodeSelector: u.GeneralNodeSelector, + Containers: containers, + Volumes: volumes, + }, + }, + }, + } + + return deployment +} + +func genPomeriumRedisService(workspace *Workspace) *core.Service { + return &core.Service{ + ObjectMeta: v1.ObjectMeta{ + Name: pomeriumRedisName, + Namespace: workspace.Namespace, + }, + Spec: core.ServiceSpec{ + Selector: map[string]string{"app": pomeriumRedisName}, + Ports: []core.ServicePort{ + { + Protocol: core.ProtocolTCP, + Port: int32(pomeriumRedisPort), + TargetPort: intstr.FromInt(pomeriumRedisPort), + }, + }, + }, + } +} + +func genPomeriumHPA(workspace *Workspace, services []Service) *autoscaling.HorizontalPodAutoscaler { + minReplicas := int32(1 + len(services)/120) + if minReplicas > 5 { + minReplicas = 5 + } + CPUUtilizationPercentage := int32(40) + + return &autoscaling.HorizontalPodAutoscaler{ + ObjectMeta: v1.ObjectMeta{ + Name: "pomerium", + Namespace: workspace.Namespace, + }, + Spec: autoscaling.HorizontalPodAutoscalerSpec{ + ScaleTargetRef: autoscaling.CrossVersionObjectReference{ + Kind: "Deployment", + Name: "pomerium", + APIVersion: "apps/v1", + }, + MinReplicas: &minReplicas, + MaxReplicas: 10, + Metrics: []autoscaling.MetricSpec{ + { + Type: "Resource", + Resource: &autoscaling.ResourceMetricSource{ + Name: "cpu", + Target: autoscaling.MetricTarget{ + Type: "Utilization", + AverageUtilization: &CPUUtilizationPercentage, + }, + }, + }, + }, + }, + } +} diff --git a/src/core/operator/controllers/workspace_postgresql.go b/src/core/operator/controllers/workspace_postgresql.go new file mode 100644 index 00000000..25e3b44e --- /dev/null +++ b/src/core/operator/controllers/workspace_postgresql.go @@ -0,0 +1,67 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) postgresql(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-postgresql" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.InternalServiceEnabled("postgresql") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["postgresql"]["repo"], + RepoName: workspace.Spec.Charts["postgresql"]["repo_name"], + Chart: workspace.Spec.Charts["postgresql"]["chart"], + Version: workspace.Spec.Charts["postgresql"]["version"], + ValuesName: workspace.Spec.Configs["postgresql-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/controllers/workspace_superset.go b/src/core/operator/controllers/workspace_superset.go new file mode 100644 index 00000000..c63743ba --- /dev/null +++ b/src/core/operator/controllers/workspace_superset.go @@ -0,0 +1,67 @@ +package controllers + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + ctrlu "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + + "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + . "datacoves.com/operator/api/v1" +) + +func (r *WorkspaceReconciler) superset(ctx context.Context, workspace *Workspace) error { + log := log.FromContext(ctx) + ns := workspace.Namespace + releaseName := workspace.Name + "-superset" + gotRelease := HelmRelease{} + err := r.Get(ctx, client.ObjectKey{Namespace: ns, Name: releaseName}, &gotRelease) + if err != nil && !errors.IsNotFound(err) { + return err + } + releaseNotFound := errors.IsNotFound(err) + + if !workspace.ServiceEnabled("superset") { + if releaseNotFound { + return nil + } + log.Info("delete HelmRelease", "name", releaseName) + return r.Delete(ctx, &gotRelease) + } + + release := &HelmRelease{ + ObjectMeta: v1.ObjectMeta{ + Namespace: ns, + Name: releaseName, + }, + Spec: HelmReleaseSpec{ + RepoURL: workspace.Spec.Charts["superset"]["repo"], + RepoName: workspace.Spec.Charts["superset"]["repo_name"], + Chart: workspace.Spec.Charts["superset"]["chart"], + Version: workspace.Spec.Charts["superset"]["version"], + ValuesName: workspace.Spec.Configs["superset-values"], + }, + } + + // If the release is already in the state we want it, do nothing. + if gotRelease.Spec.ValuesName == release.Spec.ValuesName { + return nil + } + + // Set the owner of the deployment, so that it is deleted when the owner is deleted. + err = ctrlu.SetControllerReference(workspace, release, r.Scheme) + if err != nil { + return err + } + if releaseNotFound { + log.Info("create HelmRelease", "name", releaseName) + return r.Create(ctx, release) + } else { + log.Info("update HelmRelease", "name", releaseName) + release.ResourceVersion = gotRelease.ResourceVersion + return r.Update(ctx, release) + } +} diff --git a/src/core/operator/go.mod b/src/core/operator/go.mod new file mode 100644 index 00000000..0b08948b --- /dev/null +++ b/src/core/operator/go.mod @@ -0,0 +1,78 @@ +module datacoves.com/operator + +go 1.17 + +require ( + github.com/TheZeroSlave/zapsentry v1.14.0 + github.com/getsentry/sentry-go v0.18.0 + github.com/go-logr/logr v1.2.0 + github.com/onsi/ginkgo v1.16.5 + github.com/onsi/gomega v1.17.0 + go.uber.org/zap v1.24.0 + helm.sh/helm/v3 v3.8.0 + k8s.io/api v0.23.1 + k8s.io/apimachinery v0.23.1 + k8s.io/client-go v0.23.1 + sigs.k8s.io/controller-runtime v0.11.0 + sigs.k8s.io/yaml v1.3.0 +) + +require ( + cloud.google.com/go v0.99.0 // indirect + github.com/Azure/go-autorest v14.2.0+incompatible // indirect + github.com/Azure/go-autorest/autorest v0.11.20 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.15 // indirect + github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect + github.com/Azure/go-autorest/logger v0.2.1 // indirect + github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/fsnotify/fsnotify v1.5.1 // indirect + github.com/go-logr/zapr v1.2.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-jwt/jwt/v4 v4.0.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/go-cmp v0.5.9 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/googleapis/gnostic v0.5.5 // indirect + github.com/imdario/mergo v0.3.12 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/nxadm/tail v1.4.8 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/prometheus/client_golang v1.11.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.28.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be // indirect + golang.org/x/net v0.0.0-20221002022538-bcab6841153b // indirect + golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect + golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec // indirect + golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect + golang.org/x/text v0.3.7 // indirect + golang.org/x/time v0.0.0-20220922220347-f3bd1da661af // indirect + gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/protobuf v1.28.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.23.1 // indirect + k8s.io/component-base v0.23.1 // indirect + k8s.io/klog/v2 v2.30.0 // indirect + k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect + k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect + sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect +) diff --git a/src/core/operator/go.sum b/src/core/operator/go.sum new file mode 100644 index 00000000..93b2d9df --- /dev/null +++ b/src/core/operator/go.sum @@ -0,0 +1,1688 @@ +bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM= +cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= +github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw= +github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.20 h1:s8H1PbCZSqg/DH7JMlOz6YMig6htWLNPsjDdlLqCx3M= +github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= +github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= +github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.15 h1:X+p2GF0GWyOiSmqohIaEeuNFNDY4I4EOlVuUQvFdWMk= +github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= +github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= +github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= +github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/mocks v0.4.1 h1:K0laFcLE6VLTOwNgSxaGbUcLPuGXlNkbVvq4cW4nIHk= +github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= +github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= +github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= +github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= +github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= +github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= +github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/squirrel v1.5.2/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= +github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= +github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= +github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg= +github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= +github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= +github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM= +github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= +github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= +github.com/TheZeroSlave/zapsentry v1.14.0 h1:z6H0lIyKVpdXsP5K8J11b4/+V+QCDLDslZmtXsnduMk= +github.com/TheZeroSlave/zapsentry v1.14.0/go.mod h1:0hUR1Et5tSroEYhWOZcxE2iBqcFkuG7jGGlUmOA7XAs= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= +github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= +github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= +github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc= +github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= +github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= +github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= +github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= +github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= +github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= +github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E= +github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss= +github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI= +github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= +github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM= +github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= +github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= +github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.2/go.mod h1:qpbpJ1jmlqsR9f2IyaLPsdkCdnt0rbDVqIDlhuu5tRY= +github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= +github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= +github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.1-0.20191213020239-082f7e3aed57/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= +github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= +github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= +github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= +github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= +github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo= +github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y= +github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ= +github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM= +github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0= +github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4= +github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU= +github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk= +github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= +github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g= +github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok= +github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0= +github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA= +github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow= +github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms= +github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= +github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= +github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= +github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= +github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= +github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= +github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= +github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= +github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s= +github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw= +github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y= +github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY= +github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= +github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM= +github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8= +github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc= +github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4= +github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= +github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= +github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= +github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= +github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684/go.mod h1:UfCu3YXJJCI+IdnqGgYP82dk2+Joxmv+mUTVBES6wac= +github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= +github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= +github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= +github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= +github.com/getsentry/sentry-go v0.18.0 h1:MtBW5H9QgdcJabtZcuJG80BMOwaBpkRDZkxRkNC1sN0= +github.com/getsentry/sentry-go v0.18.0/go.mod h1:Kgon4Mby+FJ7ZWHFUAZgVaIa8sxHtnRJRLTXZr51aKQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/zapr v1.2.0 h1:n4JnPI1T3Qq1SFEi/F8rwLrZERp2bso19PJZDB9dayk= +github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM= +github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI= +github.com/gobuffalo/packr/v2 v2.8.1/go.mod h1:c/PLlOuTU+p3SybaJATW3H6lX/iK7xEz5OeMf+NnJpg= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= +github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0 h1:RAqyYixv1p7uEnocuy8P1nru5wprCh/MH2BIlW5z5/o= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= +github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w= +github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= +github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= +github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/karrick/godirwalk v1.15.8/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= +github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= +github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= +github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= +github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= +github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= +github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= +github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= +github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= +github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= +github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.17.0 h1:9Luw4uT5HTjHTN8+aNcSThgH1vdXnmdJ8xIfZ4wyTRE= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs= +github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE= +github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw= +github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc/go.mod h1:HFLT6i9iR4QBOF5rdCyjddC9t59ArqWJV2xx+jwcCMo= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= +github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= +github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM= +github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= +github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= +github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0= +github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= +github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= +go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= +go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= +go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= +go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= +go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4= +go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= +go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= +go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= +go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= +go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= +go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= +go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= +go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= +golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be h1:fmw3UbQh+nxngCAHrDCCztao/kbYFnWjoqop8dHx05A= +golang.org/x/crypto v0.0.0-20220926161630-eccd6366d1be/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20221002022538-bcab6841153b h1:6e93nYa3hNqAvLr0pD4PN1fFS+gKzp2zAXqrnTCstqU= +golang.org/x/net v0.0.0-20221002022538-bcab6841153b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190522044717-8097e1b27ff5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec h1:BkDtF2Ih9xZ7le9ndzTA7KJow28VbQW3odyk/8drmuI= +golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af h1:Yx9k8YCG3dvF87UAn2tu2HQLf2dt/eR1bXxpLMWeH+Y= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= +gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= +google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190522204451-c2c4e71fbf69/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +helm.sh/helm/v3 v3.8.0 h1:vlQQDDQkrH4NECOFbGcwjjKyHL5Sa3xNLjMxXm7fMVo= +helm.sh/helm/v3 v3.8.0/go.mod h1:0nYPSuvuj8TTJDLRSAfbzGGbazPZsayaDpP8s9FfZT8= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo= +k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ= +k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= +k8s.io/api v0.23.1 h1:ncu/qfBfUoClqwkTGbeRqqOqBCRoUAflMuOaOD7J0c8= +k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo= +k8s.io/apiextensions-apiserver v0.23.1 h1:xxE0q1vLOVZiWORu1KwNRQFsGWtImueOrqSl13sS5EU= +k8s.io/apiextensions-apiserver v0.23.1/go.mod h1:0qz4fPaHHsVhRApbtk3MGXNn2Q9M/cVWWhfHdY2SxiM= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= +k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc= +k8s.io/apimachinery v0.23.1 h1:sfBjlDFwj2onG0Ijx5C+SrAoeUscPrmghm7wHP+uXlo= +k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= +k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= +k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM= +k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= +k8s.io/apiserver v0.23.1/go.mod h1:Bqt0gWbeM2NefS8CjWswwd2VNAKN6lUKR85Ft4gippY= +k8s.io/cli-runtime v0.23.1/go.mod h1:r9r8H/qfXo9w+69vwUL7LokKlLRKW5D6A8vUKCx+YL0= +k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= +k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k= +k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= +k8s.io/client-go v0.23.1 h1:Ma4Fhf/p07Nmj9yAB1H7UwbFHEBrSPg8lviR24U2GiQ= +k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= +k8s.io/code-generator v0.23.1/go.mod h1:V7yn6VNTCWW8GqodYCESVo95fuiEg713S8B7WacWZDA= +k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= +k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= +k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= +k8s.io/component-base v0.23.1 h1:j/BqdZUWeWKCy2v/jcgnOJAzpRYWSbGcjGVYICko8Uc= +k8s.io/component-base v0.23.1/go.mod h1:6llmap8QtJIXGDd4uIWJhAq0Op8AtQo6bDW2RrNMTeo= +k8s.io/component-helpers v0.23.1/go.mod h1:ZK24U+2oXnBPcas2KolLigVVN9g5zOzaHLkHiQMFGr0= +k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= +k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= +k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= +k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kubectl v0.23.1/go.mod h1:Ui7dJKdUludF8yWAOSN7JZEkOuYixX5yF6E6NjoukKE= +k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/metrics v0.23.1/go.mod h1:qXvsM1KANrc+ZZeFwj6Phvf0NLiC+d3RwcsLcdGc+xs= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= +k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +oras.land/oras-go v1.1.0/go.mod h1:1A7vR/0KknT2UkJVWh+xMi95I/AhK8ZrxrnUSmXN0bQ= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.25/go.mod h1:Mlj9PNLmG9bZ6BHFwFKDo5afkpWyUISkb9Me0GnK66I= +sigs.k8s.io/controller-runtime v0.11.0 h1:DqO+c8mywcZLFJWILq4iktoECTyn30Bkj0CwgqMpZWQ= +sigs.k8s.io/controller-runtime v0.11.0/go.mod h1:KKwLiTooNGu+JmLZGn9Sl3Gjmfj66eMbCQznLP5zcqA= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= +sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ= +sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io= +sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.2.0 h1:kDvPBbnPk+qYmkHmSo8vKGp438IASWofnbbUKDE/bv0= +sigs.k8s.io/structured-merge-diff/v4 v4.2.0/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/src/core/operator/hack/boilerplate.go.txt b/src/core/operator/hack/boilerplate.go.txt new file mode 100644 index 00000000..e69de29b diff --git a/src/core/operator/helm/helm_runner.go b/src/core/operator/helm/helm_runner.go new file mode 100644 index 00000000..fe2eca3c --- /dev/null +++ b/src/core/operator/helm/helm_runner.go @@ -0,0 +1,340 @@ +// Package helm runs helm subprocesses to install/upgrade/uninstall helm charts. +// It runs each chart's helm operations run sequentially. +// It schedules the subprocesses to bound total memory usage. +package helm + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "strings" + "syscall" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + core "k8s.io/api/core/v1" +) + +// RunnerOptions are parameters passed to Start. +type RunnerOptions struct { + // Maximum memory used for all helm subprocess together at any given time. + MaxMemoryBytes int + + // Maximum memory used for a single helm operation (subprocess). + MaxMemoryBytesPerOp int + + // Maximum memory used for a helm operation keyed by chart to set different + // limits for different charts if desired. If a charts is not found in this + // map the limit used will be the default MaxMemoryBytesPerOp. + MaxMemoryBytesPerOpByChart map[string]int + + // Maximum allowed time to run a helm repo update. + RepoUpdateTimeout time.Duration + + // Maximum allowed time to run a helm install or upgrade. + InstallTimeout time.Duration + + // Maximum allowed time to run a helm uninstall. + UninstallTimeout time.Duration +} + +// A Chart value identifies a helm chart from this package's point of view. +// Install/Uninstall operations on the same Chart are serialized, they will +// never run concurrent with each other. They are allowed to run concurrent +// with the operations on other Charts. +type Chart struct { + Namespace string // The namespace to install to. + Name string // The name of the chart, including the repo name. + Release string // The name of the release to install it as. +} + +// An InstallArgs value specifies the parameters to install a chart. +type InstallArgs struct { + RepoURL string + RepoName string + Version string + ValuesName string +} + +// globals +var ( + // Runner options / parameters. + opts RunnerOptions + + // Install/uninstall operations channel. + opChan chan operation + + // Feedback channel for spawned operation goroutines to communicate to the + // run loop that they have finished. + doneChan chan Chart + + // Kubernetes client. + kc client.Client + + // Logger. + log = ctrl.Log.WithName("helm") +) + +// Start starts the runner, the goroutine that coordinates helm operations. Call +// it once at most. Cancelling the context signals starting to shut down. No +// more helm operations are spawned, and the runner waits for running ones. Once +// there are no more running operations it signals completion by closing the +// returned channel. +func Start(ctx context.Context, options RunnerOptions, client client.Client) chan struct{} { + if opChan != nil { + panic("Start called more than once") + } + opts = options + opChan = make(chan operation) + doneChan = make(chan Chart) + kc = client + allDoneChan := make(chan struct{}) + go run(ctx, allDoneChan) + return allDoneChan +} + +// Request that a helm chart be installed. +func Install(chart Chart, args InstallArgs) { + opChan <- operation{opInstall, chart, &args} +} + +// Request that a helm chart be uninstalled. +func Uninstall(chart Chart) { + opChan <- operation{opUninstall, chart, nil} +} + +type opKind byte + +const ( + opInstall opKind = 'i' + opUninstall opKind = 'u' +) + +type operation struct { + kind opKind + chart Chart + args *InstallArgs +} + +func (op *operation) summary() string { + return fmt.Sprintf("%c %s", op.kind, op.chart.Release) +} + +// This state must only accessed by the run loop goroutine. +type state struct { + // One pending operation per chart. If further operations are requested for a + // chart while there is one running the last request overwrites previous ones. + pending map[Chart]*operation + + // Running operations. A single operation is allowed to run at once per chart. + // The value is the memory allocated for use by the operation, in bytes. + running map[Chart]int +} + +func (s *state) memoryInUse() int { + inUseMem := 0 + for _, mem := range s.running { + inUseMem += mem + } + return inUseMem +} + +func (s *state) memoryForOperation(op *operation) int { + if mem, found := opts.MaxMemoryBytesPerOpByChart[op.chart.Name]; found { + return mem + } + return opts.MaxMemoryBytesPerOp +} + +func (s *state) enoughMemoryForOperation(op *operation) bool { + return s.memoryForOperation(op) <= opts.MaxMemoryBytes-s.memoryInUse() +} + +func run(ctx context.Context, allDoneChan chan struct{}) { + s := &state{ + pending: map[Chart]*operation{}, + running: map[Chart]int{}, + } + + for { + select { + // Context cancelled. + case <-ctx.Done(): + log.Info("context cancelled") + // Wait for all spawned goroutines to finish. + for len(s.running) > 0 { + delete(s.running, <-doneChan) + } + close(allDoneChan) // Signal that we are done. + return + + // Operation received. + case op := <-opChan: + log.Info("operation received") + s.pending[op.chart] = &op + + // Operation done. + case chart := <-doneChan: + log.Info("operation done") + delete(s.running, chart) + } + + log.Info("status", "pending", len(s.pending), "running", len(s.running)) + + // Spawn pending operations if possible. + // NOTE: From the go spec "The iteration order over maps is not specified + // and is not guaranteed to be the same from one iteration to the next." + // The fairness of the scheduling we do here will be determined by go's + // map iteration algorithm. This could be a problem. + // See: https://dev.to/wallyqs/gos-map-iteration-order-is-not-that-random-mag + for chart, op := range s.pending { + if _, opRunning := s.running[chart]; opRunning { + log.Info("skiping pending op (already running)", "chart", op.chart) + continue + } + if !s.enoughMemoryForOperation(op) { + log.Info("skiping pending op (not enough memory)", "chart", op.chart) + continue + } + + log.Info("spawn operation", "op", op.summary(), "chart", op.chart) + delete(s.pending, chart) + s.running[chart] = s.memoryForOperation(op) + go runOperation(ctx, op) + } + } +} + +func runOperation(ctx context.Context, op *operation) { + defer func() { doneChan <- op.chart }() + var err error + switch op.kind { + case opInstall: + err = runInstall(ctx, op) + if err != nil { + // NOTE: Once this is logged, the operation has been processed. It won't + // be retried. A programmer needs to look at this error and fix it. If + // it can be fixed by retrying something, do the retries within the + // operation, and don't surface the error to here. + log.Error(err, "helm error", "op", op.summary(), "chart", op.chart) + } + case opUninstall: + // NOTE: We're not interested on raising uninstall errors since it's most + // likely that the release was already uninstalled on a previous reconcile + runUninstall(ctx, op) + } +} + +func runInstall(ctx context.Context, op *operation) error { + // TODO: Don't run helm update on every install, it takes significant time + // and is not needed most times. Only run it if it hasn't run for a while. + if err := runRepoUpdate(ctx, op); err != nil { + return err + } + valuesFile, err := downloadValues(ctx, op) + if err != nil { + return err + } + defer os.Remove(valuesFile) + return runCommand(ctx, opts.InstallTimeout, + "helm", "-n", op.chart.Namespace, "upgrade", "-i", + op.chart.Release, op.chart.Name, + "--version", op.args.Version, + "-f", valuesFile, + ) +} + +func runUninstall(ctx context.Context, op *operation) error { + return runCommand(ctx, opts.UninstallTimeout, + "helm", "-n", op.chart.Namespace, "uninstall", + op.chart.Release, + ) +} + +func runRepoUpdate(ctx context.Context, op *operation) error { + if err := runCommand(ctx, opts.RepoUpdateTimeout, + "helm", "-n", op.chart.Namespace, "repo", "add", + op.args.RepoName, op.args.RepoURL, + ); err != nil { + return err + } + return runCommand(ctx, opts.RepoUpdateTimeout, + "helm", "-n", op.chart.Namespace, "repo", "update", + op.args.RepoName, + ) +} + +// NOTE: We need to depend on a kubernetes client to fetch the values. Having +// the full helm values as InstallArgs is not desirable, the request would have +// to be done to early, and those values would have to live in memory for longer +// than they have to. If we really wanted to cut the dependency on the client +// we could take a getValues closure in InstallArgs that makes the request to +// fetch the values when called. +func downloadValues(ctx context.Context, op *operation) (string, error) { + ns := op.chart.Namespace + name := op.args.ValuesName + + valuesFile := os.TempDir() + "/" + name + "-values.yaml" + + valuesObj := core.ConfigMap{} + err := kc.Get(ctx, client.ObjectKey{Namespace: ns, Name: name}, &valuesObj) + if err != nil { + return "", err + } + values, ok := valuesObj.Data["values.yaml"] + if !ok { + return "", fmt.Errorf("values configmap data must have values.yaml: %s", name) + } + + if err := os.WriteFile(valuesFile, []byte(values), 0644); err != nil { + return "", err + } + + return valuesFile, nil +} + +func runCommand(ctx context.Context, timeout time.Duration, name string, arg ...string) error { + log.Info("running command", "command", cmdline(name, arg)) + // cmdCtx is not a child of ctx because if it was the subprocess would be + // KILLed right away when cancelled and if this happens we cannot try to + // TERMinate it first. + cmdCtx, cmdCancel := context.WithCancel(context.Background()) + cmd := exec.CommandContext(cmdCtx, name, arg...) + defer cmdCancel() + // NOTE: bytes.Buffer will grow as needed to fit helm's stderr. We can tail + // the output to a circular buffer instead if we want to cap memory usage. + var stderr bytes.Buffer + cmd.Stderr = &stderr + // NOTE: If we also want stdout we can add: `cmd.Stdout = &stderr`. + if err := cmd.Start(); err != nil { + return err + } + go func() { + // Wait until the command finishes or ctx is cancelled. + select { + // The operation's context has been cancelled. Terminate the subprocess. + case <-ctx.Done(): + // We first try sending SIGTERM, to give helm a chance to clean up. + // Hopefully this way we avoid helm release secret corruption. + // see: https://github.com/helm/helm/pull/9180 + cmd.Process.Signal(syscall.SIGTERM) + time.Sleep(time.Second) + // Cancel cmdCtx to ensure the process exits. If still alive it will + // be sent SIGKILL. + cmdCancel() + case <-cmdCtx.Done(): + } + }() + if err := cmd.Wait(); err != nil { + return fmt.Errorf("runCommand(%s): %w: %s", cmdline(name, arg), err, stderr.String()) + } + return nil +} + +func cmdline(name string, arg []string) string { + return name + " " + strings.Join(arg, " ") +} diff --git a/src/core/operator/main.go b/src/core/operator/main.go new file mode 100644 index 00000000..d8ec21c0 --- /dev/null +++ b/src/core/operator/main.go @@ -0,0 +1,222 @@ +package main + +import ( + "flag" + "os" + "time" + + // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) + // to ensure that exec-entrypoint and run can make use of them. + _ "k8s.io/client-go/plugin/pkg/client/auth" + + "github.com/TheZeroSlave/zapsentry" + "github.com/getsentry/sentry-go" + "github.com/go-logr/zapr" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrlzap "sigs.k8s.io/controller-runtime/pkg/log/zap" + + datacovescomv1 "datacoves.com/operator/api/v1" + "datacoves.com/operator/controllers" + "datacoves.com/operator/helm" + //+kubebuilder:scaffold:imports +) + +var ( + scheme = runtime.NewScheme() + setupLog = ctrl.Log.WithName("setup") +) + +func init() { + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + + utilruntime.Must(datacovescomv1.AddToScheme(scheme)) + //+kubebuilder:scaffold:scheme +} + +func main() { + var metricsAddr string + var enableLeaderElection bool + var probeAddr string + flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") + flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.BoolVar(&enableLeaderElection, "leader-elect", false, + "Enable leader election for controller manager. "+ + "Enabling this will ensure there is only one active controller manager.") + opts := ctrlzap.Options{ + Development: true, + TimeEncoder: zapcore.ISO8601TimeEncoder, + } + opts.BindFlags(flag.CommandLine) + flag.Parse() + + // Logging setup. A bit of a mess. The operator uses logr.Logger, which is + // what kubebuilder and controller-runtime come with. Logr is an interface + // package. The backend used is zap, through the zapr package. The package + // controller-runtime/pkg/log/zap wraps zapr to set some default options and + // provides utilities to configure the zap logger from command line flags. + // To integrate sentry with the zap/logr loggers we use the zapsentry package. + // It takes a *zap.Logger to attach to, which we get from ctrlzap.NewRaw. + sentryDsn, ok := os.LookupEnv("SENTRY_DSN") + var sentryClient *sentry.Client + var err error + if ok { + // TODO: Sentry defaults to HTTP. Use HTTPS. + sentryClient, err = sentry.NewClient(sentry.ClientOptions{ + Dsn: sentryDsn, + Release: os.Getenv("SENTRY_RELEASE"), + Environment: os.Getenv("SENTRY_ENVIRONMENT"), + AttachStacktrace: true, + }) + // NOTE: This waits for a second before exiting for sentry to flush. + // Are there any unintended consequences from doing this? + defer sentryClient.Flush(time.Second) + } + if ok && err == nil { + zapLogger := ctrlzap.NewRaw(ctrlzap.UseFlagOptions(&opts)) + zapLogger = modifyToSentryLogger(zapLogger, sentryClient) + ctrl.SetLogger(zapr.NewLogger(zapLogger)) + setupLog.Info("sentry configured") + } else { + ctrl.SetLogger(ctrlzap.New(ctrlzap.UseFlagOptions(&opts))) + if !ok { + setupLog.Info("SENTRY_DSN not set") + } else { + setupLog.Error(err, "unable to create sentry client") + } + } + + leaseDuration := 30 * time.Second // Default is 15 seconds. + renewDeadline := 20 * time.Second // Default is 10 seconds. + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: 9443, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "5c393c71.datacoves.com", + LeaseDuration: &leaseDuration, + RenewDeadline: &renewDeadline, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + os.Exit(1) + } + + if err = (&controllers.WorkspaceReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "Workspace") + os.Exit(1) + } + if err = (&controllers.UserReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "User") + os.Exit(1) + } + if err = (&controllers.HelmReleaseReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "HelmRelease") + os.Exit(1) + } + // if err = (&controllers.AccountReconciler{ + // Client: mgr.GetClient(), + // Scheme: mgr.GetScheme(), + // }).SetupWithManager(mgr); err != nil { + // setupLog.Error(err, "unable to create controller", "controller", "Account") + // os.Exit(1) + // } + //+kubebuilder:scaffold:builder + + if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up health check") + os.Exit(1) + } + if err := mgr.AddReadyzCheck("readyz", healthz.Ping); err != nil { + setupLog.Error(err, "unable to set up ready check") + os.Exit(1) + } + + // NOTE: On helm subprocess termination: + // - https://go.dev/ref/spec#Program_execution + // Go programs exit when the main goroutine exits. They don't wait for other + // running goroutines to finish. We need to do it. + // - https://go.dev/blog/context + // - https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination + // Kubernetes sends TERM, waits for a gracePeriod (manager.yaml), then KILLs. + // + // When the operator gets TERM, the ctx below is cancelled. The helm package + // stops taking install/uninstall request at this point, and starts waiting + // for all subprocesses to finish. If they don't finish within the grace + // period the operator will be KILLed. The subprocesses are most likely + // orphaned and killed. Killing helm can leave it's release kubernetes + // secret in a bad state (PendingUpgrade). Some things we can do: + // + // 1. Increase the grace period significantly to let subprocesses finish. + // Unlikely that we can/want to increase it enough to ensure any helm + // subprocess completes normally. + // 2. Forward the TERM signal to the subprocesses. This probably lets helm + // shut down more gracefully (https://github.com/helm/helm/pull/9180). + // 3. As a last resort, when helm was KILLed and the release is in a bad + // state, try to recover by deleting the release secret, a la nuke_helm_release. + + // This starts a goroutine that cancels ctx is on TERM, and os.Exits on receiving + // a second termination signal. + ctx := ctrl.SetupSignalHandler() + + // We use the mgr kubernetes client, which does caching, etc. This couples + // the helm package and the manager. If this leads to problems or difficulty + // in undertanding how things are working we may be better off by having the + // helm package setup its own non-caching kubernetes client. + setupLog.Info("starting helm") + helmOptions := helm.RunnerOptions{ + MaxMemoryBytes: 1000 * 1024 * 1024, + MaxMemoryBytesPerOp: 50 * 1024 * 1024, + RepoUpdateTimeout: 2 * time.Minute, + InstallTimeout: 20 * time.Minute, + UninstallTimeout: 10 * time.Minute, + } + helmDone := helm.Start(ctx, helmOptions, mgr.GetClient()) + + setupLog.Info("starting manager") + err = mgr.Start(ctx) + if err != nil { + setupLog.Error(err, "problem running manager") + } + + // The ctx has been cancelled. Wait for the helm runner goroutine to finish. + <-helmDone + + if err != nil { + os.Exit(1) + } +} + +func modifyToSentryLogger(log *zap.Logger, client *sentry.Client) *zap.Logger { + cfg := zapsentry.Configuration{ + Level: zapcore.ErrorLevel, + EnableBreadcrumbs: false, // TODO: Enable if we use them. + } + core, err := zapsentry.NewCore(cfg, zapsentry.NewSentryClientFromClient(client)) + + // In case of err it will return noop core. So, we can safely attach it. + if err != nil { + log.Warn("failed to init zap", zap.Error(err)) + } + + log = zapsentry.AttachCoreToLogger(core, log) + + // To use breadcrumbs feature - create new scope explicitly + // and attach after attaching the core. + return log.With(zapsentry.NewScope()) +} diff --git a/src/core/static-pages/Dockerfile b/src/core/static-pages/Dockerfile new file mode 100644 index 00000000..372bcee7 --- /dev/null +++ b/src/core/static-pages/Dockerfile @@ -0,0 +1,13 @@ +FROM nginx:1.12-alpine +LABEL com.datacoves.from=nginx:1.12-alpine +LABEL com.datacoves.version.observe.nginx=1.12-alpine + +COPY default.conf /etc/nginx/conf.d/default.conf + +WORKDIR /usr/share/nginx/html/ +COPY src . +COPY templates /tmp/templates + +EXPOSE 80 + +CMD ["/bin/sh", "-c", "envsubst < /tmp/templates/index.template.html > /usr/share/nginx/html/index.html; nginx -g 'daemon off;'"] diff --git a/src/core/static-pages/default.conf b/src/core/static-pages/default.conf new file mode 100644 index 00000000..ad0ae8d8 --- /dev/null +++ b/src/core/static-pages/default.conf @@ -0,0 +1,35 @@ +server { + listen 80; + server_name _; + charset utf-8; + add_header Cache-Control no-cache; + + root /usr/share/nginx/html/; + + error_page 403 /index.html; + error_page 404 /index.html; + + location / { + try_files $uri $uri/ /index.html; + root /usr/share/nginx/html/; + allow all; + } + + location = /favicon.ico { + root /usr/share/nginx/html/assets; + } + + location /static { + alias /usr/share/nginx/html/assets; + } + + location /assets { + alias /usr/share/nginx/html/assets; + } + + location = /service/down/ { + try_files /service-down.html =404; + root /usr/share/nginx/html/; + } + +} \ No newline at end of file diff --git a/src/core/static-pages/src/assets/favicon.ico b/src/core/static-pages/src/assets/favicon.ico new file mode 100644 index 00000000..12ff98b8 Binary files /dev/null and b/src/core/static-pages/src/assets/favicon.ico differ diff --git a/src/core/static-pages/src/assets/logo.svg b/src/core/static-pages/src/assets/logo.svg new file mode 100644 index 00000000..e7fd17d4 --- /dev/null +++ b/src/core/static-pages/src/assets/logo.svg @@ -0,0 +1,4 @@ + + + + diff --git a/src/core/static-pages/src/service-down.html b/src/core/static-pages/src/service-down.html new file mode 100644 index 00000000..118318dc --- /dev/null +++ b/src/core/static-pages/src/service-down.html @@ -0,0 +1,132 @@ + + + + Datacoves on Maintenance + + + + + + + +
+ +
+

Something isn’t done loading.

+

Try reloading your workspace.

+

If this continues, please contact support.

+ + +
+
+ + + diff --git a/src/core/static-pages/templates/index.template.html b/src/core/static-pages/templates/index.template.html new file mode 100644 index 00000000..fac859b2 --- /dev/null +++ b/src/core/static-pages/templates/index.template.html @@ -0,0 +1,37 @@ + + + + Datacoves on Maintenance + + + + + + + + +

⚠️ SERVICE DOWN ⚠️

+

Datacoves is currently down for scheduled maintenance

+
+

+ The service is planned to be restored by 🕒 + ${RESTORE_TIME}. +

+

+ Please contact + ✉️ ${CONTACT_NAME} + if you have any questions. +

+
+

Thanks, the Datacoves Team.

+ + \ No newline at end of file diff --git a/src/core/workbench/.dockerignore b/src/core/workbench/.dockerignore new file mode 100644 index 00000000..cf709889 --- /dev/null +++ b/src/core/workbench/.dockerignore @@ -0,0 +1 @@ +**/node_modules diff --git a/src/core/workbench/Dockerfile b/src/core/workbench/Dockerfile new file mode 100644 index 00000000..78ef0267 --- /dev/null +++ b/src/core/workbench/Dockerfile @@ -0,0 +1,40 @@ +FROM node:18.12.1-alpine AS local +LABEL com.datacoves.from=nginx:1.12-alpine +LABEL com.datacoves.version.core-workbench='0.4.0' +LABEL com.datacoves.version.core-workbench.node='16.8.0-alpine' +LABEL com.datacoves.version.core-workbench.nginx='1.12-alpine' + +RUN apk add --no-cache vim + +WORKDIR /usr/src +COPY app/package.json . +COPY app/yarn.lock . + +RUN yarn install # backup of node_modules for later reuse + +ENV PATH=/usr/src/app/node_modules/.bin:$PATH + +WORKDIR /usr/src/app + +CMD sh -c "cp -rf ../node_modules . && tail -f /dev/null" + +# build deps +FROM local AS build-deps + +COPY app /usr/src/app + +WORKDIR /usr/src/app + +RUN mv ../node_modules . && yarn build + +# Nginx +FROM nginx:1.12-alpine AS production + +COPY --from=build-deps /usr/src/app/build /usr/share/nginx/workbench + +COPY default.conf /etc/nginx/conf.d/default.conf + +EXPOSE 80 + +CMD ["nginx", "-g", "daemon off;"] + diff --git a/src/core/workbench/app/.env.example b/src/core/workbench/app/.env.example new file mode 100644 index 00000000..0144273e --- /dev/null +++ b/src/core/workbench/app/.env.example @@ -0,0 +1,4 @@ +REACT_APP_API_URL=http://api.bulletproofapp.com +REACT_APP_API_MOCKING=true +TSC_COMPILE_ON_ERROR=true +ESLINT_NO_DEV_ERRORS=true \ No newline at end of file diff --git a/src/core/workbench/app/.eslintrc.js b/src/core/workbench/app/.eslintrc.js new file mode 100644 index 00000000..f802a175 --- /dev/null +++ b/src/core/workbench/app/.eslintrc.js @@ -0,0 +1,75 @@ +module.exports = { + root: true, + env: { + node: true, + es6: true, + }, + parserOptions: { ecmaVersion: 8, sourceType: 'module' }, + ignorePatterns: ['node_modules/*'], + extends: ['eslint:recommended'], + overrides: [ + { + files: ['**/*.ts', '**/*.tsx'], + parser: '@typescript-eslint/parser', + settings: { + react: { version: 'detect' }, + 'import/resolver': { + typescript: {}, + }, + }, + env: { + browser: true, + node: true, + es6: true, + }, + extends: [ + 'eslint:recommended', + 'plugin:import/errors', + 'plugin:import/warnings', + 'plugin:import/typescript', + 'plugin:@typescript-eslint/recommended', + 'plugin:react/recommended', + 'plugin:react-hooks/recommended', + 'plugin:jsx-a11y/recommended', + 'plugin:prettier/recommended', + 'plugin:testing-library/react', + 'plugin:jest-dom/recommended', + ], + rules: { + 'no-restricted-imports': [ + 'error', + { + patterns: ['@/features/*/*'], + }, + ], + 'linebreak-style': ['error', 'unix'], + 'react/prop-types': 'off', + + 'import/order': [ + 'error', + { + groups: ['builtin', 'external', 'internal', 'parent', 'sibling', 'index', 'object'], + 'newlines-between': 'always', + alphabetize: { order: 'asc', caseInsensitive: true }, + }, + ], + 'import/default': 'off', + 'import/no-named-as-default-member': 'off', + 'import/no-named-as-default': 'off', + + 'react/react-in-jsx-scope': 'off', + + 'jsx-a11y/anchor-is-valid': 'off', + + '@typescript-eslint/no-unused-vars': ['error'], + + '@typescript-eslint/explicit-function-return-type': ['off'], + '@typescript-eslint/explicit-module-boundary-types': ['off'], + '@typescript-eslint/no-empty-function': ['off'], + '@typescript-eslint/no-explicit-any': ['off'], + + 'prettier/prettier': ['error', {}, { usePrettierrc: true }], + }, + }, + ], +}; diff --git a/src/core/workbench/app/.gitignore b/src/core/workbench/app/.gitignore new file mode 100644 index 00000000..41ae8e3b --- /dev/null +++ b/src/core/workbench/app/.gitignore @@ -0,0 +1,28 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage +/cypress/videos +/cypress/screenshots + +# production +/build + +# misc +.DS_Store +.env +.env.local +.env.development.local +.env.test.local +.env.production.local + +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +*.core \ No newline at end of file diff --git a/src/core/workbench/app/.husky/.gitignore b/src/core/workbench/app/.husky/.gitignore new file mode 100644 index 00000000..31354ec1 --- /dev/null +++ b/src/core/workbench/app/.husky/.gitignore @@ -0,0 +1 @@ +_ diff --git a/src/core/workbench/app/.husky/pre-commit b/src/core/workbench/app/.husky/pre-commit new file mode 100755 index 00000000..4c856eef --- /dev/null +++ b/src/core/workbench/app/.husky/pre-commit @@ -0,0 +1,4 @@ +#!/bin/sh +. "$(dirname "$0")/_/husky.sh" + +yarn validate diff --git a/src/core/workbench/app/.prettierrc b/src/core/workbench/app/.prettierrc new file mode 100644 index 00000000..e1c6a260 --- /dev/null +++ b/src/core/workbench/app/.prettierrc @@ -0,0 +1,7 @@ +{ + "singleQuote": true, + "trailingComma": "es5", + "printWidth": 100, + "tabWidth": 2, + "useTabs": false +} \ No newline at end of file diff --git a/src/core/workbench/app/.storybook/main.js b/src/core/workbench/app/.storybook/main.js new file mode 100644 index 00000000..fcc0e22c --- /dev/null +++ b/src/core/workbench/app/.storybook/main.js @@ -0,0 +1,41 @@ +const path = require('path'); +const TsconfigPathsPlugin = require('tsconfig-paths-webpack-plugin'); + +module.exports = { + stories: ['../src/**/*.stories.mdx', '../src/**/*.stories.@(js|jsx|ts|tsx)'], + addons: [ + '@storybook/addon-links', + '@storybook/addon-essentials', + '@storybook/preset-create-react-app', + ], + webpackFinal: async (config) => { + config.module.rules.push({ + test: /\.css$/, + use: [ + { + loader: 'postcss-loader', + options: { + ident: 'postcss', + plugins: [require('autoprefixer')], + }, + }, + ], + include: path.resolve(__dirname, '../'), + }); + config.resolve.plugins = config.resolve.plugins || []; + config.resolve.plugins.push( + new TsconfigPathsPlugin({ + configFile: path.resolve(__dirname, '../tsconfig.json'), + }) + ); + return { + ...config, + plugins: config.plugins.filter((plugin) => { + if (plugin.constructor.name === 'ESLintWebpackPlugin') { + return false; + } + return true; + }), + }; + }, +}; diff --git a/src/core/workbench/app/.storybook/preview.js b/src/core/workbench/app/.storybook/preview.js new file mode 100644 index 00000000..9913c47a --- /dev/null +++ b/src/core/workbench/app/.storybook/preview.js @@ -0,0 +1,15 @@ +import React from 'react'; +import { AppProvider } from '../src/context'; +import '../src/index.css'; + +export const parameters = { + actions: { argTypesRegex: '^on[A-Z].*' }, +}; + +export const decorators = [ + (Story) => ( + + + + ), +]; diff --git a/src/core/workbench/app/.vscode/extensions.json b/src/core/workbench/app/.vscode/extensions.json new file mode 100644 index 00000000..96313570 --- /dev/null +++ b/src/core/workbench/app/.vscode/extensions.json @@ -0,0 +1,8 @@ +{ + "recommendations": [ + "dbaeumer.vscode-eslint", + "esbenp.prettier-vscode", + "dsznajder.es7-react-js-snippets", + "mariusalchimavicius.json-to-ts", + ] +} diff --git a/src/core/workbench/app/.vscode/launch.json b/src/core/workbench/app/.vscode/launch.json new file mode 100644 index 00000000..b23d5875 --- /dev/null +++ b/src/core/workbench/app/.vscode/launch.json @@ -0,0 +1,15 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "pwa-chrome", + "request": "launch", + "name": "Launch Chrome against localhost", + "url": "http://localhost:8080", + "webRoot": "${workspaceFolder}" + } + ] +} \ No newline at end of file diff --git a/src/core/workbench/app/.vscode/settings.json b/src/core/workbench/app/.vscode/settings.json new file mode 100644 index 00000000..124dc6d4 --- /dev/null +++ b/src/core/workbench/app/.vscode/settings.json @@ -0,0 +1,6 @@ +{ + "editor.formatOnSave": true, + "editor.codeActionsOnSave": { + "source.fixAll.eslint": true + } +} diff --git a/src/core/workbench/app/LICENCE b/src/core/workbench/app/LICENCE new file mode 100644 index 00000000..d36980f7 --- /dev/null +++ b/src/core/workbench/app/LICENCE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2021 Alan Alickovic + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/src/core/workbench/app/README.md b/src/core/workbench/app/README.md new file mode 100644 index 00000000..c2ae3736 --- /dev/null +++ b/src/core/workbench/app/README.md @@ -0,0 +1,49 @@ +# Bulletproof React 🛡️ ⚛️ + +[![MIT License](https://img.shields.io/apm/l/atomic-design-ui.svg?)](https://github.com/tterb/atomic-design-ui/blob/master/LICENSEs) + +A simple, scalable, and powerful architecture for building production ready React applications. + +## Introduction + +React is a great tool for building frontend applications. It has a very diverse ecosystem with hundreds of great libraries for literally anything you might need. However, it can be overwhelming to be forced to make so many choices. +It is also very flexible, you can write React applications in any way you like but that flexibility comes with a cost. Since there is no pre-defined architecture developers can follow, it often leads to messy, inconsistent, or over-complicated codebases. + +This is an attempt to present the way of creating React applications using the best tools in the ecosystem with a good project structure that scales very well. It is based on the experience of working with many different codebases, and this architecture turns out to be the most effective one. + +The goal of this repo is to serve as a collection of good practices when developing React applications. It is supposed to showcase solving most of the real-world problems of an application in a practical way and help developers writing better applications. + +Feel free to explore the codebase to get the most value out of the repo. + +#### Disclaimer: + +This is not supposed to be a template or a framework. It is an opinionated guide that shows how to do some things in a certain way. You are not forced to do everything exactly as it is shown here, decide what works best for you and your team and be consistent with your style. + +## Table Of Contents: + +- [The Application Overview](docs/application-overview.md) +- [Project Configuration](docs/project-configuration.md) +- [Project Structure](docs/project-structure.md) +- [Components And Styling](docs/components-and-styling.md) +- [Forms](docs/forms.md) +- [API Layer](docs/api-layer.md) +- [State Management](docs/state-management.md) +- [Auth](docs/auth.md) +- [API Mock Server](docs/api-mock-server.md) +- [Testing](docs/testing.md) +- [Error Handling](docs/error-handling.md) +- [Performance](docs/performance.md) + +## Contributing + +Contributions are always welcome! If you have any ideas, suggestions, fixes, feel free to contribute. You can do that by going through the following steps: + +1. Clone this repo +2. Create a branch: `git checkout -b your-feature` +3. Make some changes +4. Test your changes +5. Push your branch and open a Pull Request + +## License + +[MIT](https://choosealicense.com/licenses/mit/) diff --git a/src/core/workbench/app/craco.config.js b/src/core/workbench/app/craco.config.js new file mode 100644 index 00000000..7c82c4a9 --- /dev/null +++ b/src/core/workbench/app/craco.config.js @@ -0,0 +1,23 @@ +const path = require('path'); + +module.exports = { + webpack: { + alias: { + '@': path.resolve(__dirname, 'src'), + }, + }, + style: { + postcss: { + plugins: [require('autoprefixer')], + }, + }, + jest: { + configure: (jestConfig, { env, paths, resolve, rootDir }) => { + return { + ...jestConfig, + setupFiles: [...jestConfig.setupFiles, './src/config/__mocks__/dom.js'], + moduleFileExtensions: [...jestConfig.moduleFileExtensions, 'ts', 'tsx'] + }; + } + }, +}; diff --git a/src/core/workbench/app/cypress.json b/src/core/workbench/app/cypress.json new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/src/core/workbench/app/cypress.json @@ -0,0 +1 @@ +{} diff --git a/src/core/workbench/app/cypress/.eslintrc.js b/src/core/workbench/app/cypress/.eslintrc.js new file mode 100644 index 00000000..a7c5984a --- /dev/null +++ b/src/core/workbench/app/cypress/.eslintrc.js @@ -0,0 +1,6 @@ +module.exports = { + root: true, + plugins: ['eslint-plugin-cypress'], + parser: '@typescript-eslint/parser', + env: { 'cypress/globals': true }, +}; diff --git a/src/core/workbench/app/cypress/fixtures/example.json b/src/core/workbench/app/cypress/fixtures/example.json new file mode 100644 index 00000000..02e42543 --- /dev/null +++ b/src/core/workbench/app/cypress/fixtures/example.json @@ -0,0 +1,5 @@ +{ + "name": "Using fixtures to represent data", + "email": "hello@cypress.io", + "body": "Fixtures are a great way to mock data for responses to routes" +} diff --git a/src/core/workbench/app/cypress/global.d.ts b/src/core/workbench/app/cypress/global.d.ts new file mode 100644 index 00000000..e99cbb7c --- /dev/null +++ b/src/core/workbench/app/cypress/global.d.ts @@ -0,0 +1,10 @@ +// add new command to the existing Cypress interface +declare global { + namespace Cypress { + interface Chainable { + checkAndDismissNotification: (matcher: RegExp | string) => void; + } + } +} + +export {}; diff --git a/src/core/workbench/app/cypress/integration/smoke.ts b/src/core/workbench/app/cypress/integration/smoke.ts new file mode 100644 index 00000000..9de396fb --- /dev/null +++ b/src/core/workbench/app/cypress/integration/smoke.ts @@ -0,0 +1,220 @@ +import { + userGenerator, + discussionGenerator, + commentGenerator, +} from '../../src/test/data-generators'; + +import { formatDate } from '../../src/utils/format'; + +describe('smoke', () => { + it('should handle normal app flow', () => { + const user = userGenerator(); + + const discussion = discussionGenerator(); + + // registration: + cy.visit('http://localhost:3000/auth/register'); + + cy.findByRole('textbox', { + name: /first name/i, + }).type(user.firstName); + cy.findByRole('textbox', { + name: /last name/i, + }).type(user.lastName); + cy.findByRole('textbox', { + name: /email address/i, + }).type(user.email); + cy.findByLabelText(/password/i).type(user.password); + + cy.findByRole('textbox', { + name: /team name/i, + }).type(user.teamName); + + cy.findByRole('button', { + name: /register/i, + }).click(); + + cy.findByRole('heading', { + name: `Welcome ${user.firstName} ${user.lastName}`, + }).should('exist'); + + // log out: + cy.findByRole('button', { + name: /open user menu/i, + }).click(); + + cy.findByRole('menuitem', { + name: /sign out/i, + }).click(); + + // log in: + cy.visit('http://localhost:3000/auth/login'); + + cy.findByRole('textbox', { + name: /email address/i, + }).type(user.email); + cy.findByLabelText(/password/i).type(user.password); + + cy.findByRole('button', { + name: /log in/i, + }).click(); + + cy.findByRole('heading', { + name: `Welcome ${user.firstName} ${user.lastName}`, + }).should('exist'); + + cy.findByRole('link', { + name: /discussions/i, + }).click(); + + // create discussion: + cy.findByRole('button', { + name: /create discussion/i, + }).click(); + + cy.findByRole('dialog').within(() => { + cy.findByRole('textbox', { + name: /title/i, + }).type(discussion.title); + cy.findByRole('textbox', { + name: /body/i, + }).type(discussion.body); + cy.findByRole('button', { + name: /submit/i, + }).click(); + }); + + cy.checkAndDismissNotification(/discussion created/i); + + cy.findByRole('dialog').should('not.exist'); + + cy.wait(200); + + // visit discussion page: + cy.findByRole('row', { + name: `${discussion.title} ${formatDate(discussion.createdAt)} View Delete`, + }).within(() => { + cy.findByRole('link', { + name: /view/i, + }).click(); + }); + + cy.findByRole('heading', { + name: discussion.title, + }).should('exist'); + + // update discussion: + cy.findByRole('button', { + name: /update discussion/i, + }).click(); + + const updatedDiscussion = discussionGenerator(); + + cy.findByRole('dialog').within(() => { + cy.findByRole('textbox', { + name: /title/i, + }) + .clear() + .type(updatedDiscussion.title); + cy.findByRole('textbox', { + name: /body/i, + }) + .clear() + .type(updatedDiscussion.body); + cy.findByRole('button', { + name: /submit/i, + }).click(); + }); + + cy.checkAndDismissNotification(/discussion updated/i); + + cy.findByRole('heading', { + name: updatedDiscussion.title, + }).should('exist'); + + // create comment: + const comment = commentGenerator(); + + cy.findByRole('button', { + name: /create comment/i, + }).click(); + + cy.findByRole('dialog').within(() => { + cy.findByRole('textbox', { + name: /body/i, + }).type(comment.body, { force: true }); // for some reason it requires force to be set to true + + cy.findByRole('button', { + name: /submit/i, + }).click(); + }); + + cy.checkAndDismissNotification(/comment created/i); + + cy.findByRole('list', { + name: 'comments', + }).within(() => { + cy.findByText(comment.body).should('exist'); + }); + + cy.wait(200); + + // delete comment: + cy.findByRole('list', { + name: 'comments', + }).within(() => { + cy.findByRole('listitem', { + name: `comment-${comment.body}-0`, + }).within(() => { + cy.findByRole('button', { + name: /delete comment/i, + }).click(); + }); + }); + + cy.findByRole('dialog').within(() => { + cy.findByRole('button', { + name: /delete/i, + }).click(); + }); + + cy.wait(200); + + cy.checkAndDismissNotification(/comment deleted/i); + + cy.findByRole('list', { + name: 'comments', + }).within(() => { + cy.findByText(comment.body).should('not.exist'); + }); + + // go back to discussions list: + cy.findByRole('link', { + name: /discussions/i, + }).click(); + + cy.wait(200); + + // delete discussion: + cy.findByRole('row', { + name: `${updatedDiscussion.title} ${formatDate(discussion.createdAt)} View Delete`, + }).within(() => { + cy.findByRole('button', { + name: 'Delete', + }).click(); + }); + + cy.findByRole('dialog').within(() => { + cy.findByRole('button', { + name: /delete/i, + }).click(); + }); + + cy.checkAndDismissNotification(/discussion deleted/i); + + cy.wait(200); + cy.findByRole('row', { + name: `${updatedDiscussion.title} ${formatDate(discussion.createdAt)} View Delete`, + }).should('not.exist'); + }); +}); diff --git a/src/core/workbench/app/cypress/plugins/index.ts b/src/core/workbench/app/cypress/plugins/index.ts new file mode 100644 index 00000000..8229063a --- /dev/null +++ b/src/core/workbench/app/cypress/plugins/index.ts @@ -0,0 +1,22 @@ +/// +// *********************************************************** +// This example plugins/index.js can be used to load plugins +// +// You can change the location of this file or turn off loading +// the plugins file with the 'pluginsFile' configuration option. +// +// You can read more here: +// https://on.cypress.io/plugins-guide +// *********************************************************** + +// This function is called when a project is opened or re-opened (e.g. due to +// the project's config changing) + +/** + * @type {Cypress.PluginConfig} + */ +// eslint-disable-next-line no-unused-vars +module.exports = (on, config) => { + // `on` is used to hook into various events Cypress emits + // `config` is the resolved Cypress config +}; diff --git a/src/core/workbench/app/cypress/support/commands.ts b/src/core/workbench/app/cypress/support/commands.ts new file mode 100644 index 00000000..8bb17164 --- /dev/null +++ b/src/core/workbench/app/cypress/support/commands.ts @@ -0,0 +1,35 @@ +// *********************************************** +// This example commands.js shows you how to +// create various custom commands and overwrite +// existing commands. +// +// For more comprehensive examples of custom +// commands please read more here: +// https://on.cypress.io/custom-commands +// *********************************************** +// +// +// -- This is a parent command -- +// Cypress.Commands.add('login', (email, password) => { ... }) +// +// +// -- This is a child command -- +// Cypress.Commands.add('drag', { prevSubject: 'element'}, (subject, options) => { ... }) +// +// +// -- This is a dual command -- +// Cypress.Commands.add('dismiss', { prevSubject: 'optional'}, (subject, options) => { ... }) +// +// +// -- This will overwrite an existing command -- +// Cypress.Commands.overwrite('visit', (originalFn, url, options) => { ... }) +import '@testing-library/cypress/add-commands'; + +Cypress.Commands.add('checkAndDismissNotification', (matcher) => { + cy.findByRole('alert', { + name: matcher, + }).within(() => { + cy.findByText(matcher).should('exist'); + cy.findByRole('button').click(); + }); +}); diff --git a/src/core/workbench/app/cypress/support/index.ts b/src/core/workbench/app/cypress/support/index.ts new file mode 100644 index 00000000..37a498fb --- /dev/null +++ b/src/core/workbench/app/cypress/support/index.ts @@ -0,0 +1,20 @@ +// *********************************************************** +// This example support/index.js is processed and +// loaded automatically before your test files. +// +// This is a great place to put global configuration and +// behavior that modifies Cypress. +// +// You can change the location of this file or turn off +// automatically serving support files with the +// 'supportFile' configuration option. +// +// You can read more here: +// https://on.cypress.io/configuration +// *********************************************************** + +// Import commands.js using ES2015 syntax: +import './commands'; + +// Alternatively you can use CommonJS syntax: +// require('./commands') diff --git a/src/core/workbench/app/cypress/tsconfig.json b/src/core/workbench/app/cypress/tsconfig.json new file mode 100644 index 00000000..71d9a9fb --- /dev/null +++ b/src/core/workbench/app/cypress/tsconfig.json @@ -0,0 +1,9 @@ +{ + "compilerOptions": { + "esModuleInterop": true, + "target": "es5", + "lib": ["es5", "dom"], + "types": ["node", "cypress", "@testing-library/cypress"] + }, + "include": ["**/*.ts"] +} diff --git a/src/core/workbench/app/docs/api-layer.md b/src/core/workbench/app/docs/api-layer.md new file mode 100644 index 00000000..5c3c84bd --- /dev/null +++ b/src/core/workbench/app/docs/api-layer.md @@ -0,0 +1,13 @@ +# API Layer + +### Use a single instance of the API client + +No matter if your application is consuming RESTful or GraphQL API, have a single instance of the API client that's been pre-configured and reused throughout the application. E.g have a single API client ([axios](https://github.com/axios/axios) / [graphql-request](https://github.com/prisma-labs/graphql-request) / [apollo-client](https://www.apollographql.com/docs/react/)) instance with pre-defined configuration. + +[API Client Example Code](../src/lib/axios.ts) + +### Define and export request declarations + +Instead of declaring API requests on the go, have them defined and exported separately. If it's a restful API a declaration would be a fetcher function that calls an endpoint. On the other hand, requests for GraphQL APIs are declared via queries and mutations that could be consumed by data fetching libraries such as [react-query](https://react-query.tanstack.com/), [apollo-client](https://www.apollographql.com/docs/react/), [urql](https://formidable.com/open-source/urql/), etc. This makes it easier to track which endpoints are defined and available in the application You can also type the responses and infer it further for a good type safety of the data. + +[API Request Declaration Example Code](../src/features/discussions/api/index.ts) diff --git a/src/core/workbench/app/docs/api-mock-server.md b/src/core/workbench/app/docs/api-mock-server.md new file mode 100644 index 00000000..fcda5906 --- /dev/null +++ b/src/core/workbench/app/docs/api-mock-server.md @@ -0,0 +1,11 @@ +# API Mock Server + +For prototyping the API use [mswjs](https://mswjs.io/), which is a great tool for quickly creating frontends without worrying about servers. It is not an actual backend, but a mocked server inside a service worker that intercepts all HTTP requests and returns desired responses based on the handlers you define. This is especially useful if you only have access to the frontend and are blocked by some not implemented features on the backend. This way, you will not be forced to wait for the feature to be completed or hardcode response data in the code, but use actual HTTP calls to build frontend features. + +It can be used for designing API endpoints. The business logic of the mocked API can be created in its handlers. + +[API Handlers Example Code](../src/test/server/handlers/auth.ts) + +[Data Models Example Code](../src/test/server/db.ts) + +Having fully functional mocked API server also handy when it comes to testing, you don't have to mock fetch, but make requests to the mocked server instead with the data your application would expect. diff --git a/src/core/workbench/app/docs/application-overview.md b/src/core/workbench/app/docs/application-overview.md new file mode 100644 index 00000000..e1521cd8 --- /dev/null +++ b/src/core/workbench/app/docs/application-overview.md @@ -0,0 +1,52 @@ +# The Application Overview + +The application is pretty simple. Users can create teams where other users can join, and they start discussions on different topics between each other. + +A team is created during the registration if the user didn't choose to join an existing team and the user becomes the admin of it. + +## Data model + +The application contains the following models: + +- User - can have one of these roles: + + - `ADMIN` can: + - create/edit/delete discussions + - create/delete own comments + - delete users + - edit own profile + - `USER` - can: + - edit own profile + - create/delete own comments + +- Team: represents a team that has 1 admin and many users that can participate in discussions between each other. + +- Discussion: represents discussions created by team members. + +- Comment: represents all the messages in a discussion. + +## Get Started + +To set up the app execute the following commands. + +```bash +git clone https://github.com/alan2207/bulletproof-react.git +cd bulletproof-react +cp .env.example .env +yarn install +``` + +##### `yarn start` + +Runs the app in the development mode.\ +Open [http://localhost:3000](http://localhost:3000) to view it in the browser. + +##### `yarn build` + +Builds the app for production to the `build` folder.\ +It correctly bundles React in production mode and optimizes the build for the best performance. + +The build is minified and the filenames include the hashes.\ +Your app is ready to be deployed! + +See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information. diff --git a/src/core/workbench/app/docs/auth.md b/src/core/workbench/app/docs/auth.md new file mode 100644 index 00000000..300b6b51 --- /dev/null +++ b/src/core/workbench/app/docs/auth.md @@ -0,0 +1,54 @@ +# Auth + +NOTE: Handling Auth on the client doesn't mean you shouldn't handle it on the server. In fact, it is more important to protect the resources on the server, but it should be handled on the client as well. + +There are 2 parts of Auth: + +## Authentication + +Authentication is a process of identifying who the user is. The most common way of authenticating users in single page applications is via [JWT](https://jwt.io/). During logging in / registration you receive a token that you store in your application, and then on each authenticated request you send the token in the header or via cookie along with the request. + +The safest option is to store the token in the app state, but if the user refreshes the app, it's token will be lost. + +That is why tokens are stored in `localStorage/sessionStorage` or in a cookie. + +#### `localStorage` vs cookie for storing tokens + +Storing it in `localStorage` could bring a security issue, if your application is vulnerable to [XSS](https://owasp.org/www-community/attacks/xss/) someone could steal your token. + +Storing tokens in cookie might be safer if the cookie is set to be `HttpOnly` which would mean it wouldn't be accessible from the client side JavaScript. The `localStorage` way is being used here for simplicity reasons, if you want to be more secure, you should consider using cookies but that is a decision that should be made together with the backend team. + +To keep the application safe, instead of focusing only on where to store the token safely, it would be recommended to make the entire application as resistant as possible to XSS attacks E.g - every input from the user should be sanitized before injected into the DOM. + +[HTML Sanitization Example Code](../src/components/Elements/MDPreview/MDPreview.tsx) + +#### Handling user data + +User info should be considered a global piece of state which should be available from anywhere in the application. +If you are already using `react-query`, you can use [react-query-auth](https://github.com/alan2207/react-query-auth) library for handling user state which will handle all the things for you after you provide it some configuration. Otherwise, you can use react context + hooks, or some 3rd party state management library. + +[Auth Configuration Example Code](../src/lib/auth.tsx) + +The application will assume the user is authenticated if a user object is present. + +[Authenticated Route Protection Example Code](../src/routes/index.tsx) + +## Authorization + +Authorization is a process of determining if the user is allowed to access a resource. + +#### RBAC (Role based access control) + +[Authorization Configuration Example Code](../src/lib/authorization.tsx) + +The most common method. Define allowed roles for a resource and then check if a user has the allowed role in order to access a resource. Good example is `USER` and `ADMIN` roles. You want to restrict some things for users and let admins access it. + +[RBAC Example Code](../src/features/discussions/components/CreateDiscussion.tsx) + +#### PBAC (Permission based access control) + +Sometimes RBAC is not enough. Some of the operations should be allowed only by the owner of the resource. For example user's comment - only the author of the comment should be able to delete it. That's why you might want to use PBAC, as it is more flexible. + +For RBAC protection you can use the `RBAC` component by passing allowed roles to it. On the other hand if you need more strict protection, you can pass policies check to it. + +[PBAC Example Code](../src/features/comments/components/CommentsList.tsx) diff --git a/src/core/workbench/app/docs/components-and-styling.md b/src/core/workbench/app/docs/components-and-styling.md new file mode 100644 index 00000000..b4c6b64e --- /dev/null +++ b/src/core/workbench/app/docs/components-and-styling.md @@ -0,0 +1,95 @@ +# Components And Styling + +## Components Best Practices + +#### Collocate things as close as possible to where it's being used + +Keep components, functions, styles, state, etc. as close as possible to the component where it's being used. + +#### Avoid nested rendering functions + +```javascript +// this is very difficult to maintain as soon as the component starts growing +function Component() { + function renderItems() { + return
    ...
; + } + return
{renderItems()}
; +} + +// extract it in a separate component +import { Items } from 'components/Items'; + +function Component() { + return ( +
+ +
+ ); +} +``` + +#### Stay consistent + +Keep your code style consistent. e.g If you name your components by using pascal case, do it everywhere. If you create components as arrow functions, do it everywhere. + +#### Limit the number of props a component is accepting as input + +If your component accepts a lot of props you might consider splitting it into multiple components or use composition via children or slots. + +[Composition Example Code](../src/components/Elements/ConfirmationDialog/ConfirmationDialog.tsx) + +#### Abstract shared components into a component library + +For larger projects, it is a good idea to build abstractions around all the shared components. It makes the application more consistent and easier to maintain. Identify repetitions before creating the components to avoid wrong abstractions. + +[Component Library Example Code](../src/components/Elements/Button/Button.tsx) + +It is a good idea to wrap 3rd party components as well in order to adapt them to the application's needs. It might be easier to make the underlying changes in the future without affecting the application's functionality. + +[3rd Party Component Example Code](../src/components/Elements/Link/Link.tsx) + +## Component libraries + +Every project requires some UI components such as modals, tabs, sidebars, menus, etc. Instead of building those from scratch, you might want to use some of the existing, battle-tested component libraries. + +#### Fully featured component libraries: + +- [Chakra UI](https://chakra-ui.com/) - great library with probably the best developer experience, allows very fast prototyping with decent design defaults. Plenty of components that are very flexible with accessibility already configured out of the box. + +- [AntD](https://ant.design/) - another great component library that has a lot of different components. Best suitable for creating admin dashboards. However, it might be a bit difficult to change the styles in order to adapt it to a custom design. + +- [Material UI](https://material-ui.com/) - the most popular component library for React. Has a lot of different components. It might be more suitable for building admin dashboards as it would not be easy to change the components to look like something else than Material Design. + +#### Headless component libraries: + +If you have a specific design system from your designer, it might be easier and better solution to go with headless components that come unstyled than to adapt a fully featured library components such as Material UI to your needs. Some good options are: + +- [Reakit](https://reakit.io/) +- [Headless UI](https://headlessui.dev/) +- [Radix UI](https://www.radix-ui.com/) +- [react-aria](https://react-spectrum.adobe.com/react-aria/) + +## Styling libraries + +There are multiple ways to style a react application. Some good options are: + +- [tailwind](https://tailwindcss.com/) +- [styled-components](https://styled-components.com/) +- [emotion](https://emotion.sh/docs/introduction) +- [stitches](https://stitches.dev/) +- [vanilla-extract](https://github.com/seek-oss/vanilla-extract) +- [CSS modules](https://github.com/css-modules/css-modules) +- [linaria](https://github.com/callstack/linaria) + +## Good combinations + +- [Chakra UI](https://chakra-ui.com/) + [emotion](https://emotion.sh/docs/introduction) - The best choice for most applications +- [Headless UI](https://headlessui.dev/) + [tailwind](https://tailwindcss.com/) +- [Radix UI](https://www.radix-ui.com/) + [stitches](https://stitches.dev/) + +## Storybook + +[Storybook](https://storybook.js.org/) is a great tool for developing and testing components in isolation. Think of it as a catalogue of all the components your application is using. Very useful especially for larger projects because it helps exploring components. + +[Storybook Story Example Code](../src/components/Elements/Button/Button.stories.tsx) diff --git a/src/core/workbench/app/docs/error-handling.md b/src/core/workbench/app/docs/error-handling.md new file mode 100644 index 00000000..40596d8f --- /dev/null +++ b/src/core/workbench/app/docs/error-handling.md @@ -0,0 +1,17 @@ +# Error Handling + +### API Errors + +Set up an interceptor for handling errors. You might want to fire a notification toast to notify users that something went wrong. + +[API Errors Notification Example Code](../src/lib/axios.ts) + +### In App Errors + +Use error boundaries to handle errors that happen in the React tree. It is very popular to set only 1 single error boundary for the entire application, which would break the entire application when an error occurs. That's why you should have more error boundaries on more specific parts of the application. That way if an error occurs the app will still work without the need to restart it. + +[Error Boundary Example Code](../src/context/index.tsx) + +### Error Tracking + +You should track any errors that occur in production. Although it's possible to implement your own solution, it is a better idea to use tools like [Sentry](https://sentry.io/). It will report any issue that breaks the app. You will also be able to see on which platform, browser, etc. did it occur. Make sure to upload source maps to sentry to see where in your source code did the error happen. diff --git a/src/core/workbench/app/docs/forms.md b/src/core/workbench/app/docs/forms.md new file mode 100644 index 00000000..05047480 --- /dev/null +++ b/src/core/workbench/app/docs/forms.md @@ -0,0 +1,26 @@ +# Forms + +Forms are a very important component of almost every React application. They allow users to provide input data to applications. + +Depending on the application needs, they might be pretty complex with many different fields which require validation. + +Forms in React can be [controlled](https://reactjs.org/docs/uncontrolled-components.html) and [uncontrolled](https://reactjs.org/docs/forms.html#controlled-components). + +Although it is possible to build any form using only React, there are pretty good solutions out there that help with handling forms such as: + +- [React Hook Form](https://react-hook-form.com/) +- [Formik](https://formik.org/) +- [React Final Form](https://github.com/final-form/react-final-form) + +Create abstracted `Form` component and all the input field components that wrap the library functionality and are adapted to the application needs. You can reuse it then throughout the application. + +[Form Example Code](../src/components/Form/Form.tsx) + +[Input Field Example Code](../src/components/Form/InputField.tsx) + +You can also integrate validation libraries with the mentioned solutions to validate inputs on the client. Some good options are: + +- [zod](https://github.com/colinhacks/zod) +- [yup](https://github.com/jquense/yup) + +[Validation Example Code](../src/features/auth/components/RegisterForm.tsx) diff --git a/src/core/workbench/app/docs/performance.md b/src/core/workbench/app/docs/performance.md new file mode 100644 index 00000000..03491cc4 --- /dev/null +++ b/src/core/workbench/app/docs/performance.md @@ -0,0 +1,50 @@ +# Performance + +### Code Splitting + +Code splitting is a technique of splitting production js into smaller pieces, thus allowing the application to be only partially downloaded. Any unused code will not be downloaded until it is required by the application. + +Most of the time code splitting should be done on the routes level. + +Do not code split everything as it might even worsen your application's performance. + +[Code Splitting Example Code](../src/routes/index.tsx) + +### Component and state optimizations + +- Do not put everything in a single context. That might trigger unnecessary re-renders. Instead split the global state into multiple contexts. + +- Keep the state as close as possible to where it is being used. This will prevent re-rendering components that do not depend on the updated state. + +- If you have a piece of state that is initialized by an expensive computation, use the state initializer function instead of executing it directly because the expensive function will be run only once as it is supposed to. e.g: + +```javascript +// instead of this which would be executed on every re-render: +const [state, setState] = React.useState(myExpensiveFn()); + +// prefer this which is executed only once: +const [state, setState] = React.useState(() => myExpensiveFn()); +``` + +- If you develop an application that requires the state to track many elements at once, you might consider state management libraries with atomic updates such as [recoil](https://recoiljs.org/) or [jotai](https://jotai.pmnd.rs/). + +- If your application is expected to have frequent updates that might affect performance, consider switching from runtime styling solutions ([Chakra UI](https://chakra-ui.com/), [emotion](https://emotion.sh/docs/introduction), [styled-components](https://styled-components.com/) that generate styles during runtime) to zero runtime styling solutions ([tailwind](https://tailwindcss.com/), [linaria](https://github.com/callstack/linaria), [vanilla-extract](https://github.com/seek-oss/vanilla-extract), [CSS modules](https://github.com/css-modules/css-modules) which generate styles during build time). + +### Image optimizations + +Consider lazy loading images that are not in the viewport. + +Use modern image formats such as WEBP for faster image loading. + +### Web vitals + +Since Google started taking web vitals in account when indexing websites, you should keep an eye on web vitals scores from [Lighthouse](https://web.dev/measure/) and [Pagespeed Insights](https://developers.google.com/speed/pagespeed/insights/). + +### Deployment + +Deploy and serve your application and assets over a CDN. Good options for that are: + +- [Vercel](https://vercel.com/) +- [Netlify](https://www.netlify.com/) +- [AWS](https://aws.amazon.com/cloudfront/) +- [CloudFlare](https://www.cloudflare.com/en-gb/cdn/) diff --git a/src/core/workbench/app/docs/project-configuration.md b/src/core/workbench/app/docs/project-configuration.md new file mode 100644 index 00000000..7ee3b4d1 --- /dev/null +++ b/src/core/workbench/app/docs/project-configuration.md @@ -0,0 +1,59 @@ +# Project Configuration + +The application has been bootstrapped using `Create React App` for simplicity reasons. It allows us to create applications quickly without dealing with a complex tooling setup such as bundling, transpiling etc. + +You should always configure and use the following tools: + +#### ESLint + +ESLint is a linting tool for JavaScript. By providing specific configuration defined in the`.eslintrc.js` file it prevents developers from making silly mistakes in their code and enforces consistency in the codebase. + +[ESLint Configuration Example Code](../.eslintrc.js) + +#### Prettier + +This is a great tool for formatting code. It enforces a consistent code style across your entire codebase. By utilizing the "format on save" feature in our IDE you can automatically format the code based on the configuration provided in the `.prettierrc` file. It also gives us good feedback when something is wrong with the code. If it doesn't auto-format, something is wrong with the code. + +[Prettier Configuration Example Code](../.prettierrc) + +#### TypeScript + +ESLint is great for catching some of the bugs related to the language, but since JavaScript is a dynamic language ESLint cannot check data that run through the applications, which can lead to bugs, especially on larger projects. That is why TypeScript should be used. It is very useful during large refactors because it reports any issues we might have missed otherwise. When refactoring, change the type declaration first, then fix all the TypeScript errors throughout the project and you are done. One thing you should keep in mind is that TypeScript does not protect your application from failing during the runtime, it only does type checking during the build time, but it increases development confidence drastically anyways. Here is a [great resource on using TypeScript with React](https://react-typescript-cheatsheet.netlify.app/). + +#### Husky + +Husky is a tool for executing git hooks. Use Husky to run your code checking before every commit, thus making sure the code is in the best shape possible at any point of time and no faulty commits get into the repo. It can run linting, code formatting and type checking, etc. before it allows pushing the code. You can check how to configure it [here](https://typicode.github.io/husky/#/?id=usage). + +#### Absolute imports + +Absolute imports should always be configured and used because it makes it easier to move files around and avoid messy import paths such as `../../../Component`. Wherever you move the file, all the imports will remain intact. Here is how to configure it: + +For JavaScript projects: + +``` +// jsconfig.json +"compilerOptions": { + "baseUrl": ".", + "paths": { + "@/*": ["./src/*"] + } + } +``` + +For TypeScript projects: + +``` +// tsconfig.json +"compilerOptions": { + "baseUrl": ".", + "paths": { + "@/*": ["./src/*"] + } + } +``` + +[Paths Configuration Example Code](../tsconfig.paths.json) + +In this project we have to create another tsconfig file `tsconfig.paths.json` where we configure the paths and merge it with the base configuration, because CRA will override it otherwise. + +It is also possible to define multiple paths for various folders, but using `@/*` works very well because it is short enough so there is no need to configure multiple paths and it differs from other modules so there is no confusion in what comes from `node_modules` and what is our source files. That means that anything in the `src` folder can be accessed via `@`, e.g some file that lives in `src/components/MyComponent` can be accessed using `@/components/MyComponents`. \ No newline at end of file diff --git a/src/core/workbench/app/docs/project-structure.md b/src/core/workbench/app/docs/project-structure.md new file mode 100644 index 00000000..60fb7cb5 --- /dev/null +++ b/src/core/workbench/app/docs/project-structure.md @@ -0,0 +1,81 @@ +# Project Structure + +Most of the code lives in the `src` folder and looks like this: + +``` +src +| ++-- assets # assets folder can contain all the static data such as images, fonts, etc. +| ++-- components # shared components used across the entire application +| ++-- config # all the global configuration, env variables etc. get exported from here and used in the app +| ++-- context # all of the global contexts +| ++-- features # feature based modules +| ++-- hooks # shared hooks used across the entire application +| ++-- lib # re-exporting different libraries preconfigured for the application +| ++-- routes # routes configuration +| ++-- test # test utilities and mock server +| ++-- types # base types used accross the application +| ++-- utils # shared utility functions +``` + +In order to scale the application in the easiest and most maintainable way, keep most of the code inside the `features` folder, which should contain different feature-based things. Every `feature` folder should contain domain specific code for a specific feature. This will allow you to keep functionalities scoped to a feature and not mix it with the shared things. This is much easier to maintain than a flat folder structure with many files. + +A feature could have the following structure: + +``` +src/features/awesome-feature +| ++-- api # exported API request declarations related to the feature +| ++-- components # components scoped to the feature, not used anywhere else +| ++-- hooks # hooks scoped to the feature, not used anywhere else +| ++-- routes # route components for the given feature +| ++-- types # typescript types for the given feature +| ++-- utils # utility functions used only by the feature +| ++-- index.ts # entry point for the feature, it should serve as the public API of the given feature and exports everything that should be used outside the feature +``` + +A feature folder could also contain other features (if used only within the parent feature) or be kept separated, it's a matter of preference. + +Everything from a feature should be exported from the `index.ts` file which behaves as the public API of the feature. + +You should import stuff from other features only by using: + +`import {AwesomeComponent} from "@/features/awesome-feature" `js + +and not + +`import {AwesomeComponent} from "@/features/awesome-feature/components/AwesomeComponent` + +This can also be configured in the ESLint configuration to disallow the later import by the following rule: + +``` +{ + rules: { + 'no-restricted-imports': [ + 'error', + { + patterns: ['@/features/*/*'], + }, + ], + + ...rest of the configuration +} +``` + +This was inspired by how [NX](https://nx.dev/) handles libraries that are isolated but available to be used by the other modules. Think of a feature as a library or a module that is self-contained but can expose different parts to other features via its entry point. diff --git a/src/core/workbench/app/docs/state-management.md b/src/core/workbench/app/docs/state-management.md new file mode 100644 index 00000000..f192e57b --- /dev/null +++ b/src/core/workbench/app/docs/state-management.md @@ -0,0 +1,36 @@ +# State Management + +We can split the state in several categories: + +#### UI State + +This is the state that controls interactive parts of an application. Opening modals, notifications, changing color mode, etc. For best performance and maintainability, keep the state as close as possible to the components that are using it. Don't make everything global out of the box. + +Good UI State Libraries: + +- [Context](https://reactjs.org/docs/context.html) + [hooks](https://reactjs.org/docs/hooks-intro.html) +- [zustand](https://github.com/pmndrs/zustand) +- [constate](https://github.com/diegohaz/constate) +- [redux](https://redux.js.org/) +- [mobx](https://mobx.js.org) +- [jotai](https://github.com/pmndrs/jotai) +- [recoil](https://recoiljs.org/) + +[UI State Example Code](../src/hooks/useNotificationStore.ts) + +#### Server Cache State + +This is the state that comes from the server which is being cached on the client for further usage. It is possible to store remote data inside a state management store such as redux, but there are better solutions for that. + +Good Server Cache Libraries: + +- [react-query](https://react-query.tanstack.com/) - REST + GraphQL +- [swr]() - REST + GraphQL +- [apollo client]() - GraphQL +- [urql]() - GraphQl + +[Server State Example Code](../src/features/discussions/hooks/useDiscussions.ts) + +#### URL State + +State that is being kept in the URL bar of the browser. Very useful for keeping pagination data of a list because even if the page gets refreshed, it will keep all the states in the URL bar so the user will see the same results. diff --git a/src/core/workbench/app/docs/testing.md b/src/core/workbench/app/docs/testing.md new file mode 100644 index 00000000..cb565f30 --- /dev/null +++ b/src/core/workbench/app/docs/testing.md @@ -0,0 +1,50 @@ +# Testing + +This [tweet](https://twitter.com/rauchg/status/807626710350839808) explains in a concise way how to think about testing. You will get the most benefit from having integration and e2e tests. Unit tests are fine, but they wouldn't give you as much confidence that your application is working as integration tests do. + +### Types of tests: + +#### Unit Tests + +Unit testing, as the naming already reveals is a type of testing where a unit of the application is being tested in isolation. +Write unit tests for shared components and functions that are used throughout the entire application as they might be used in different scenarios which might be difficult to reproduce in the integration tests. + +[Unit Test Example Code](../src/components/Elements/ConfirmationDialog/__tests__/ConfirmationDialog.test.tsx) + +#### Integration Tests + +Integration testing is a method of testing multiple parts of the application at once. +Most of your tests should be integration tests, as these will give you the most benefits and confidence for your invested effort. Unit tests on their own don't guarantee that your app will work even if those tests pass, because the relationship between the units might be wrong. You should test different features with integration tests. + +[Integration Test Example Code](../src/features/auth/components/__tests__/RegisterForm.test.tsx) + +#### E2E + +End-To-End Testing is a testing method where the application is tested as a complete entity. +Usually these tests consist of running the entire application with the frontend and the backend in an automated way and verifying that the entire system works. It is usually written in the way the application should be used by the user. + +[E2E Example Code](../cypress/integration/smoke.ts) + +### Tooling: + +#### [Jest](https://jestjs.io/) + +Jest is a fully featured testing framework and is the de-facto standard when it comes to testing JavaScript applications. It is very flexible and configurable to test both frontends and backends. + +#### [Testing Library](https://testing-library.com/) + +Testing library is a set of libraries and tools that makes testing easier than ever before. Its philosophy is to test your app in a way it is being used by a real world user instead of testing implementation details. For example, don't test what is the current state value in a component, but test what that component renders on the screen for the user. If you refactor your app to use a different state management solution, the tests will still be relevant as the actual component output to the user didn't change. + +#### [Cypress](https://www.cypress.io/) + +Cypress is a tool for running e2e tests in an automated way. +You define all the commands a real world user would execute when using the app and then start the test. It can be started in 2 modes: + +- Browser mode - it will start a dedicated browser and run your application from start to finish. You get a nice set of tools to visualize and inspect your application on each step. Since this is a more expensive option, you want to run it only locally when developing the application. +- Headless mode - it will start a headless browser and run your application. Very useful for integrating with CI/CD to run it on every deploy. + +It is very configurable with plugins and commands. You can even pair it with [Testing Library](https://testing-library.com/docs/cypress-testing-library/intro/) which is makes your tests even easier to write. + +You can also write custom commands to abstract some common tasks. + +[Custom Cypress Commands Example Code](../cypress/support/commands.ts) diff --git a/src/core/workbench/app/generators/component/Component.stories.tsx.hbs b/src/core/workbench/app/generators/component/Component.stories.tsx.hbs new file mode 100644 index 00000000..a0504fef --- /dev/null +++ b/src/core/workbench/app/generators/component/Component.stories.tsx.hbs @@ -0,0 +1,18 @@ +import { Meta, Story } from '@storybook/react'; + +import { {{ properCase name }} } from './{{ properCase name }}'; + +const meta: Meta = { + title: 'Components/{{ properCase name }}', + component: {{ properCase name }}, + parameters: { + controls: { expanded: true }, + }, +}; + +export default meta; + +const Template: Story = (props) => <{{ properCase name }} {...props}>Hello; + +export const Default = Template.bind({}); +Default.args = {}; diff --git a/src/core/workbench/app/generators/component/Component.tsx.hbs b/src/core/workbench/app/generators/component/Component.tsx.hbs new file mode 100644 index 00000000..f13d5e89 --- /dev/null +++ b/src/core/workbench/app/generators/component/Component.tsx.hbs @@ -0,0 +1,10 @@ +import * as React from "react"; export type +{{properCase name}}Props = {} // eslint-disable-next-line @typescript-eslint/no-unused-vars export +const +{{properCase name}} += (props: +{{properCase name}}Props) => { return ( +
+ {{properCase name}} +
+); }; \ No newline at end of file diff --git a/src/core/workbench/app/generators/component/index.js b/src/core/workbench/app/generators/component/index.js new file mode 100644 index 00000000..f2a278ba --- /dev/null +++ b/src/core/workbench/app/generators/component/index.js @@ -0,0 +1,32 @@ +module.exports = { + description: 'Component Generator', + prompts: [ + { + type: 'input', + name: 'name', + message: 'component name', + }, + { + type: 'input', + name: 'folder', + message: 'folder in components', + }, + ], + actions: [ + { + type: 'add', + path: 'src/components/{{folder}}/{{properCase name}}/index.ts', + templateFile: 'generators/component/index.ts.hbs', + }, + { + type: 'add', + path: 'src/components/{{folder}}/{{properCase name}}/{{properCase name}}.tsx', + templateFile: 'generators/component/Component.tsx.hbs', + }, + { + type: 'add', + path: 'src/components/{{folder}}/{{properCase name}}/{{properCase name}}.stories.tsx', + templateFile: 'generators/component/Component.stories.tsx.hbs', + }, + ], +}; diff --git a/src/core/workbench/app/generators/component/index.ts.hbs b/src/core/workbench/app/generators/component/index.ts.hbs new file mode 100644 index 00000000..47762f8a --- /dev/null +++ b/src/core/workbench/app/generators/component/index.ts.hbs @@ -0,0 +1 @@ +export * from './{{ properCase name }}'; diff --git a/src/core/workbench/app/package.json b/src/core/workbench/app/package.json new file mode 100644 index 00000000..e74cba91 --- /dev/null +++ b/src/core/workbench/app/package.json @@ -0,0 +1,174 @@ +{ + "name": "datacoves", + "version": "0.4.0", + "private": true, + "dependencies": { + "@chakra-ui/icons": "^2.0.15", + "@chakra-ui/react": "^2.4.5", + "@craco/craco": "^7.0.0", + "@emotion/react": "^11.4.1", + "@emotion/styled": "^11.3.0", + "@headlessui/react": "^1.2.0", + "@heroicons/react": "^1.0.1", + "@hookform/resolvers": "^2.5.2", + "@sentry/react": "^7.107.0", + "@stripe/react-stripe-js": "^1.16.1", + "@stripe/stripe-js": "^1.46.0", + "@testing-library/jest-dom": "^5.16.5", + "@testing-library/react": "^13.4.0", + "@testing-library/user-event": "^14.4.3", + "@types/faker": "^5.5.3", + "@types/jest": "^26.0.15", + "@types/node": "^18.11.18", + "@types/react": "^18.0.26", + "@types/react-dom": "^18.0.10", + "axios": "^0.27.2", + "chakra-ui-markdown-renderer": "^4.1.0", + "clsx": "^1.1.1", + "date-fns": "^2.28.0", + "dayjs": "^1.10.6", + "dompurify": "^2.2.9", + "formik": "^2.2.9", + "formik-chakra-ui": "^1.6.1", + "framer-motion": "^6.5.1", + "history": "^5.3.0", + "intersection-observer": "^0.12.2", + "jsonwebtoken": "^9.0.0", + "lodash": "^4.17.21", + "marked": "^4.2.5", + "nanoid": "^3.1.23", + "react": "^18.2.0", + "react-dom": "^18.2.0", + "react-error-boundary": "^3.1.4", + "react-hook-form": "^7.41.2", + "react-icons": "^4.7.1", + "react-markdown": "^8.0.4", + "react-query": "^3.39.2", + "react-query-auth": "^1.1.0", + "react-router-dom": "^6.6.1", + "react-scripts": "^5.0.1", + "type-fest": "^1.2.0", + "typescript": "^4.4.3", + "web-vitals": "^3.1.0", + "yup": "^0.32.11", + "zod": "^3.20.2", + "zustand": "^4.1.5" + }, + "scripts": { + "start": "WDS_SOCKET_PORT=443 craco --openssl-legacy-provider start", + "start:cli": "cross-env BROWSER=none craco start", + "build": "craco --openssl-legacy-provider build", + "test": "is-ci \"test:coverage\" \"test:jest\"", + "test:jest": "craco --openssl-legacy-provider test", + "test:coverage": "CI=1 yarn test:jest -- --coverage", + "cy:install": "cypress install", + "cy:run": "cypress run", + "cy:open": "cypress open", + "test:e2e:dev": "start-server-and-test start:cli http://localhost:3000 cy:open", + "pretest:e2e:run": "yarn build", + "test:e2e:run": "start-server-and-test serve http://localhost:3000 cy:run", + "test:e2e": "is-ci \"test:e2e:run\" \"test:e2e:dev\"", + "serve": "serve --no-clipboard --single --listen 3000 build", + "eject": "react-scripts eject", + "lint": "eslint --fix --ext .js,.ts,.tsx ./src --ignore-path .gitignore", + "prettier": "prettier --ignore-path .gitignore --write \"**/*.+(js|json|ts|tsx)\"", + "format": "npm run prettier -- --write", + "check-types": "tsc --project tsconfig.json --pretty --noEmit", + "check-format": "npm run prettier -- --list-different", + "validate-and-build": "npm-run-all --parallel check-types check-format lint build", + "validate": "npm-run-all --parallel check-types && lint-staged", + "generate": "plop", + "storybook": "start-storybook -p 6006 -s public", + "build-storybook": "build-storybook -s public" + }, + "browserslist": { + "production": [ + ">0.2%", + "not dead", + "not op_mini all" + ], + "development": [ + "last 1 chrome version", + "last 1 firefox version", + "last 1 safari version" + ] + }, + "devDependencies": { + "@mswjs/data": "^0.3.0", + "@storybook/addon-actions": "^6.5.15", + "@storybook/addon-essentials": "^6.5.15", + "@storybook/addon-links": "^6.5.15", + "@storybook/node-logger": "^6.5.15", + "@storybook/preset-create-react-app": "^4.1.2", + "@storybook/react": "^6.5.15", + "@testing-library/cypress": "^9.0.0", + "@testing-library/react-hooks": "^8.0.1", + "@types/dompurify": "^2.2.2", + "@types/jsdom": "^16.2.14", + "@types/jsonwebtoken": "^8.5.1", + "@types/lodash": "^4.14.170", + "@types/marked": "^2.0.3", + "@typescript-eslint/eslint-plugin": "^5.6.0", + "@typescript-eslint/parser": "^5.6.0", + "autoprefixer": "^9", + "cross-env": "^7.0.3", + "cypress": "^12.2.0", + "eslint": "^7.32.0", + "eslint-config-prettier": "^8.3.0", + "eslint-import-resolver-typescript": "^2.4.0", + "eslint-plugin-cypress": "^2.11.3", + "eslint-plugin-import": "^2.23.4", + "eslint-plugin-jest-dom": "^3.9.0", + "eslint-plugin-jsx-a11y": "^6.4.1", + "eslint-plugin-prettier": "^3.4.0", + "eslint-plugin-react": "^7.24.0", + "eslint-plugin-react-hooks": "^4.2.0", + "eslint-plugin-testing-library": "^4.6.0", + "faker": "^5.5.3", + "husky": "^6.0.0", + "is-ci": "^3.0.0", + "is-ci-cli": "^2.2.0", + "lint-staged": "^11.0.0", + "msw": "^0.49.2", + "npm-run-all": "^4.1.5", + "plop": "^2.7.4", + "postcss": "^8.1.0", + "prettier": "^2.3.0", + "react-test-renderer": "^18.2.0", + "serve": "^14.1.2", + "start-server-and-test": "^1.12.5", + "tsconfig-paths-webpack-plugin": "^3.5.1", + "yarn-audit-fix": "^10.0.1" + }, + "msw": { + "workerDirectory": "public" + }, + "jest": { + "moduleNameMapper": { + "^@/(.+)": "/src/$1" + }, + "collectCoverageFrom": [ + "src/**/*.{js,jsx,ts,tsx}", + "!src/**/*.d.ts", + "!src/**/*.stories.{js,jsx,ts,tsx}", + "!src/test/**/*.{js,jsx,ts,tsx}" + ] + }, + "eslintConfig": { + "overrides": [ + { + "files": [ + "**/*.stories.*" + ], + "rules": { + "import/no-anonymous-default-export": "off" + } + } + ] + }, + "lint-staged": { + "*.+(js|ts|tsx)": [ + "yarn lint" + ] + } +} diff --git a/src/core/workbench/app/plopfile.js b/src/core/workbench/app/plopfile.js new file mode 100644 index 00000000..42cc3466 --- /dev/null +++ b/src/core/workbench/app/plopfile.js @@ -0,0 +1,5 @@ +const componentGenerator = require('./generators/component/index'); + +module.exports = function (plop) { + plop.setGenerator('component', componentGenerator); +}; diff --git a/src/core/workbench/app/public/favicon.ico b/src/core/workbench/app/public/favicon.ico new file mode 100644 index 00000000..12ff98b8 Binary files /dev/null and b/src/core/workbench/app/public/favicon.ico differ diff --git a/src/core/workbench/app/public/img/control-panel.jpeg b/src/core/workbench/app/public/img/control-panel.jpeg new file mode 100644 index 00000000..c1a9fd68 Binary files /dev/null and b/src/core/workbench/app/public/img/control-panel.jpeg differ diff --git a/src/core/workbench/app/public/img/cove.jpeg b/src/core/workbench/app/public/img/cove.jpeg new file mode 100644 index 00000000..7d5c2bf1 Binary files /dev/null and b/src/core/workbench/app/public/img/cove.jpeg differ diff --git a/src/core/workbench/app/public/img/dam.jpeg b/src/core/workbench/app/public/img/dam.jpeg new file mode 100644 index 00000000..36309d88 Binary files /dev/null and b/src/core/workbench/app/public/img/dam.jpeg differ diff --git a/src/core/workbench/app/public/img/pipelines.jpeg b/src/core/workbench/app/public/img/pipelines.jpeg new file mode 100644 index 00000000..00cfc96d Binary files /dev/null and b/src/core/workbench/app/public/img/pipelines.jpeg differ diff --git a/src/core/workbench/app/public/img/ship.jpeg b/src/core/workbench/app/public/img/ship.jpeg new file mode 100644 index 00000000..d2cf0c14 Binary files /dev/null and b/src/core/workbench/app/public/img/ship.jpeg differ diff --git a/src/core/workbench/app/public/index.html b/src/core/workbench/app/public/index.html new file mode 100644 index 00000000..3002d788 --- /dev/null +++ b/src/core/workbench/app/public/index.html @@ -0,0 +1,67 @@ + + + + + + + + + + + + + + + + + + Datacoves + + + +
+ + + diff --git a/src/core/workbench/app/public/logo192.png b/src/core/workbench/app/public/logo192.png new file mode 100644 index 00000000..b70afb5e Binary files /dev/null and b/src/core/workbench/app/public/logo192.png differ diff --git a/src/core/workbench/app/public/logo512.png b/src/core/workbench/app/public/logo512.png new file mode 100644 index 00000000..44f7a726 Binary files /dev/null and b/src/core/workbench/app/public/logo512.png differ diff --git a/src/core/workbench/app/public/manifest.json b/src/core/workbench/app/public/manifest.json new file mode 100644 index 00000000..ce16d62a --- /dev/null +++ b/src/core/workbench/app/public/manifest.json @@ -0,0 +1,26 @@ +{ + "short_name": "Datacoves", + "name": "Datacoves", + "description": "Analytics Workbench for the Modern Data Stack", + "display": "fullscreen", + "icons": [ + { + "src": "favicon.ico", + "sizes": "64x64 32x32 24x24 16x16", + "type": "image/x-icon" + }, + { + "src": "logo192.png", + "type": "image/png", + "sizes": "192x192" + }, + { + "src": "logo512.png", + "type": "image/png", + "sizes": "512x512" + } + ], + "start_url": ".", + "theme_color": "#00044A", + "background_color": "#00044A" +} diff --git a/src/core/workbench/app/public/mockServiceWorker.js b/src/core/workbench/app/public/mockServiceWorker.js new file mode 100644 index 00000000..70f0a2b9 --- /dev/null +++ b/src/core/workbench/app/public/mockServiceWorker.js @@ -0,0 +1,303 @@ +/* eslint-disable */ +/* tslint:disable */ + +/** + * Mock Service Worker (0.49.2). + * @see https://github.com/mswjs/msw + * - Please do NOT modify this file. + * - Please do NOT serve this file on production. + */ + +const INTEGRITY_CHECKSUM = '3d6b9f06410d179a7f7404d4bf4c3c70' +const activeClientIds = new Set() + +self.addEventListener('install', function () { + self.skipWaiting() +}) + +self.addEventListener('activate', function (event) { + event.waitUntil(self.clients.claim()) +}) + +self.addEventListener('message', async function (event) { + const clientId = event.source.id + + if (!clientId || !self.clients) { + return + } + + const client = await self.clients.get(clientId) + + if (!client) { + return + } + + const allClients = await self.clients.matchAll({ + type: 'window', + }) + + switch (event.data) { + case 'KEEPALIVE_REQUEST': { + sendToClient(client, { + type: 'KEEPALIVE_RESPONSE', + }) + break + } + + case 'INTEGRITY_CHECK_REQUEST': { + sendToClient(client, { + type: 'INTEGRITY_CHECK_RESPONSE', + payload: INTEGRITY_CHECKSUM, + }) + break + } + + case 'MOCK_ACTIVATE': { + activeClientIds.add(clientId) + + sendToClient(client, { + type: 'MOCKING_ENABLED', + payload: true, + }) + break + } + + case 'MOCK_DEACTIVATE': { + activeClientIds.delete(clientId) + break + } + + case 'CLIENT_CLOSED': { + activeClientIds.delete(clientId) + + const remainingClients = allClients.filter((client) => { + return client.id !== clientId + }) + + // Unregister itself when there are no more clients + if (remainingClients.length === 0) { + self.registration.unregister() + } + + break + } + } +}) + +self.addEventListener('fetch', function (event) { + const { request } = event + const accept = request.headers.get('accept') || '' + + // Bypass server-sent events. + if (accept.includes('text/event-stream')) { + return + } + + // Bypass navigation requests. + if (request.mode === 'navigate') { + return + } + + // Opening the DevTools triggers the "only-if-cached" request + // that cannot be handled by the worker. Bypass such requests. + if (request.cache === 'only-if-cached' && request.mode !== 'same-origin') { + return + } + + // Bypass all requests when there are no active clients. + // Prevents the self-unregistered worked from handling requests + // after it's been deleted (still remains active until the next reload). + if (activeClientIds.size === 0) { + return + } + + // Generate unique request ID. + const requestId = Math.random().toString(16).slice(2) + + event.respondWith( + handleRequest(event, requestId).catch((error) => { + if (error.name === 'NetworkError') { + console.warn( + '[MSW] Successfully emulated a network error for the "%s %s" request.', + request.method, + request.url, + ) + return + } + + // At this point, any exception indicates an issue with the original request/response. + console.error( + `\ +[MSW] Caught an exception from the "%s %s" request (%s). This is probably not a problem with Mock Service Worker. There is likely an additional logging output above.`, + request.method, + request.url, + `${error.name}: ${error.message}`, + ) + }), + ) +}) + +async function handleRequest(event, requestId) { + const client = await resolveMainClient(event) + const response = await getResponse(event, client, requestId) + + // Send back the response clone for the "response:*" life-cycle events. + // Ensure MSW is active and ready to handle the message, otherwise + // this message will pend indefinitely. + if (client && activeClientIds.has(client.id)) { + ;(async function () { + const clonedResponse = response.clone() + sendToClient(client, { + type: 'RESPONSE', + payload: { + requestId, + type: clonedResponse.type, + ok: clonedResponse.ok, + status: clonedResponse.status, + statusText: clonedResponse.statusText, + body: + clonedResponse.body === null ? null : await clonedResponse.text(), + headers: Object.fromEntries(clonedResponse.headers.entries()), + redirected: clonedResponse.redirected, + }, + }) + })() + } + + return response +} + +// Resolve the main client for the given event. +// Client that issues a request doesn't necessarily equal the client +// that registered the worker. It's with the latter the worker should +// communicate with during the response resolving phase. +async function resolveMainClient(event) { + const client = await self.clients.get(event.clientId) + + if (client?.frameType === 'top-level') { + return client + } + + const allClients = await self.clients.matchAll({ + type: 'window', + }) + + return allClients + .filter((client) => { + // Get only those clients that are currently visible. + return client.visibilityState === 'visible' + }) + .find((client) => { + // Find the client ID that's recorded in the + // set of clients that have registered the worker. + return activeClientIds.has(client.id) + }) +} + +async function getResponse(event, client, requestId) { + const { request } = event + const clonedRequest = request.clone() + + function passthrough() { + // Clone the request because it might've been already used + // (i.e. its body has been read and sent to the client). + const headers = Object.fromEntries(clonedRequest.headers.entries()) + + // Remove MSW-specific request headers so the bypassed requests + // comply with the server's CORS preflight check. + // Operate with the headers as an object because request "Headers" + // are immutable. + delete headers['x-msw-bypass'] + + return fetch(clonedRequest, { headers }) + } + + // Bypass mocking when the client is not active. + if (!client) { + return passthrough() + } + + // Bypass initial page load requests (i.e. static assets). + // The absence of the immediate/parent client in the map of the active clients + // means that MSW hasn't dispatched the "MOCK_ACTIVATE" event yet + // and is not ready to handle requests. + if (!activeClientIds.has(client.id)) { + return passthrough() + } + + // Bypass requests with the explicit bypass header. + // Such requests can be issued by "ctx.fetch()". + if (request.headers.get('x-msw-bypass') === 'true') { + return passthrough() + } + + // Notify the client that a request has been intercepted. + const clientMessage = await sendToClient(client, { + type: 'REQUEST', + payload: { + id: requestId, + url: request.url, + method: request.method, + headers: Object.fromEntries(request.headers.entries()), + cache: request.cache, + mode: request.mode, + credentials: request.credentials, + destination: request.destination, + integrity: request.integrity, + redirect: request.redirect, + referrer: request.referrer, + referrerPolicy: request.referrerPolicy, + body: await request.text(), + bodyUsed: request.bodyUsed, + keepalive: request.keepalive, + }, + }) + + switch (clientMessage.type) { + case 'MOCK_RESPONSE': { + return respondWithMock(clientMessage.data) + } + + case 'MOCK_NOT_FOUND': { + return passthrough() + } + + case 'NETWORK_ERROR': { + const { name, message } = clientMessage.data + const networkError = new Error(message) + networkError.name = name + + // Rejecting a "respondWith" promise emulates a network error. + throw networkError + } + } + + return passthrough() +} + +function sendToClient(client, message) { + return new Promise((resolve, reject) => { + const channel = new MessageChannel() + + channel.port1.onmessage = (event) => { + if (event.data && event.data.error) { + return reject(event.data.error) + } + + resolve(event.data) + } + + client.postMessage(message, [channel.port2]) + }) +} + +function sleep(timeMs) { + return new Promise((resolve) => { + setTimeout(resolve, timeMs) + }) +} + +async function respondWithMock(response) { + await sleep(response.delay) + return new Response(response.body, response) +} diff --git a/src/core/workbench/app/public/robots.txt b/src/core/workbench/app/public/robots.txt new file mode 100644 index 00000000..2d1aa039 --- /dev/null +++ b/src/core/workbench/app/public/robots.txt @@ -0,0 +1,3 @@ +# https://www.robotstxt.org/robotstxt.html +User-agent: * +Disallow: / \ No newline at end of file diff --git a/src/core/workbench/app/src/App.tsx b/src/core/workbench/app/src/App.tsx new file mode 100644 index 00000000..ffc58ecc --- /dev/null +++ b/src/core/workbench/app/src/App.tsx @@ -0,0 +1,12 @@ +import { AppProvider } from './context'; +import { AppRoutes } from './routes'; + +function App() { + return ( + + + + ); +} + +export default App; diff --git a/src/core/workbench/app/src/__mocks__/zustand.ts b/src/core/workbench/app/src/__mocks__/zustand.ts new file mode 100644 index 00000000..dc4fc87e --- /dev/null +++ b/src/core/workbench/app/src/__mocks__/zustand.ts @@ -0,0 +1,20 @@ +import { act } from 'react-dom/test-utils'; +import actualCreate from 'zustand'; + +// a variable to hold reset functions for all stores declared in the app +const storeResetFns = new Set(); + +// when creating a store, we get its initial state, create a reset function and add it in the set +const create = (createState: any) => { + const store = actualCreate(createState); + const initialState = store.getState(); + storeResetFns.add(() => store.setState(initialState, true)); + return store; +}; + +// Reset all stores after each test run +afterEach(() => { + act(() => storeResetFns.forEach((resetFn: any) => resetFn())); +}); + +export default create; diff --git a/src/core/workbench/app/src/assets/404.svg b/src/core/workbench/app/src/assets/404.svg new file mode 100644 index 00000000..04205544 --- /dev/null +++ b/src/core/workbench/app/src/assets/404.svg @@ -0,0 +1,24 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/core/workbench/app/src/assets/500.svg b/src/core/workbench/app/src/assets/500.svg new file mode 100644 index 00000000..cdcaa1f9 --- /dev/null +++ b/src/core/workbench/app/src/assets/500.svg @@ -0,0 +1,379 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/src/core/workbench/app/src/assets/animated_logo_no_bg_small_200_15fps.gif b/src/core/workbench/app/src/assets/animated_logo_no_bg_small_200_15fps.gif new file mode 100644 index 00000000..5afef322 Binary files /dev/null and b/src/core/workbench/app/src/assets/animated_logo_no_bg_small_200_15fps.gif differ diff --git a/src/core/workbench/app/src/assets/logo.svg b/src/core/workbench/app/src/assets/logo.svg new file mode 100644 index 00000000..4fb94e36 --- /dev/null +++ b/src/core/workbench/app/src/assets/logo.svg @@ -0,0 +1,5 @@ + + + \ No newline at end of file diff --git a/src/core/workbench/app/src/assets/logo_navyblue.svg b/src/core/workbench/app/src/assets/logo_navyblue.svg new file mode 100644 index 00000000..e7fd17d4 --- /dev/null +++ b/src/core/workbench/app/src/assets/logo_navyblue.svg @@ -0,0 +1,4 @@ + + + + diff --git a/src/core/workbench/app/src/components/AdminLayout/components/BasePage.tsx b/src/core/workbench/app/src/components/AdminLayout/components/BasePage.tsx new file mode 100644 index 00000000..e1689fcf --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/components/BasePage.tsx @@ -0,0 +1,28 @@ +import { Flex, useColorModeValue as mode } from '@chakra-ui/react'; + +import { Breadcrumb } from '../../Breadcrumb'; +import { Header } from '../../Header'; +import PageSidebarContainer from '../../Sidebar/PageSidebarContainer'; + +import { PageContent } from './PageContent'; +import { PageContentNoBox } from './PageContentNoBox'; +import { PageHeader } from './PageHeader'; + +export const BasePage = (props: any) => { + return ( + +
+ + {props.header && } + + + + {props.noBox ? ( + {props.children} + ) : ( + {props.children} + )} + + + ); +}; diff --git a/src/core/workbench/app/src/components/AdminLayout/components/PageContent.tsx b/src/core/workbench/app/src/components/AdminLayout/components/PageContent.tsx new file mode 100644 index 00000000..e1a8fdb1 --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/components/PageContent.tsx @@ -0,0 +1,13 @@ +import { Box, Container, useColorModeValue } from '@chakra-ui/react'; + +export const PageContent = (props: any) => { + return ( + + + + {props.children} + + + + ); +}; diff --git a/src/core/workbench/app/src/components/AdminLayout/components/PageContentNoBox.tsx b/src/core/workbench/app/src/components/AdminLayout/components/PageContentNoBox.tsx new file mode 100644 index 00000000..26022be4 --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/components/PageContentNoBox.tsx @@ -0,0 +1,14 @@ +import { Box, Container } from '@chakra-ui/react'; +import React from 'react'; + +export const PageContentNoBox = (props: any) => { + return ( + + + + {props.children} + + + + ); +}; diff --git a/src/core/workbench/app/src/components/AdminLayout/components/PageHeader.tsx b/src/core/workbench/app/src/components/AdminLayout/components/PageHeader.tsx new file mode 100644 index 00000000..02554890 --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/components/PageHeader.tsx @@ -0,0 +1,11 @@ +import { Box, Container, Heading, useColorModeValue } from '@chakra-ui/react'; + +export const PageHeader = (props: any) => ( + + + + {props.header} + + + +); diff --git a/src/core/workbench/app/src/components/AdminLayout/components/TablePagination.tsx b/src/core/workbench/app/src/components/AdminLayout/components/TablePagination.tsx new file mode 100644 index 00000000..aaf829cd --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/components/TablePagination.tsx @@ -0,0 +1,29 @@ +import { Button, ButtonGroup, Flex, Text, useColorModeValue as mode } from '@chakra-ui/react'; +import React from 'react'; + +interface TablePaginationProps { + objectName: string; + total?: number; + prevLink?: string; + prevHandler: () => void; + nextLink?: string; + nextHandler: () => void; +} + +export const TablePagination = (props: TablePaginationProps) => { + return ( + + + {props.total === 1 ? `1 ${props.objectName}` : `${props.total} ${props.objectName}s`} + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/AdminLayout/components/index.ts b/src/core/workbench/app/src/components/AdminLayout/components/index.ts new file mode 100644 index 00000000..f3880bbe --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/components/index.ts @@ -0,0 +1,2 @@ +export * from './BasePage'; +export * from './PageContent'; diff --git a/src/core/workbench/app/src/components/AdminLayout/hooks/index.ts b/src/core/workbench/app/src/components/AdminLayout/hooks/index.ts new file mode 100644 index 00000000..036717e1 --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/hooks/index.ts @@ -0,0 +1 @@ +export * from './useMobileMenuState'; diff --git a/src/core/workbench/app/src/components/AdminLayout/hooks/useMobileMenuState.tsx b/src/core/workbench/app/src/components/AdminLayout/hooks/useMobileMenuState.tsx new file mode 100644 index 00000000..436076a0 --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/hooks/useMobileMenuState.tsx @@ -0,0 +1,19 @@ +import { useBoolean, useBreakpointValue } from '@chakra-ui/react'; +import * as React from 'react'; + +export const useMobileMenuState = () => { + const [isMenuOpen, actions] = useBoolean(); + /** + * Scenario: Menu is open on mobile, and user resizes to desktop/tablet viewport. + * Result: We'll close the menu + */ + const isMobileBreakpoint = useBreakpointValue({ base: true, lg: false }); + + React.useEffect(() => { + if (isMobileBreakpoint == false) { + actions.off(); + } + }, [isMobileBreakpoint, actions]); + + return { isMenuOpen, ...actions }; +}; diff --git a/src/core/workbench/app/src/components/AdminLayout/index.ts b/src/core/workbench/app/src/components/AdminLayout/index.ts new file mode 100644 index 00000000..f76fd6f1 --- /dev/null +++ b/src/core/workbench/app/src/components/AdminLayout/index.ts @@ -0,0 +1,2 @@ +export * from './components'; +export * from './hooks'; diff --git a/src/core/workbench/app/src/components/AlertDialog.tsx b/src/core/workbench/app/src/components/AlertDialog.tsx new file mode 100644 index 00000000..b6d8e7c0 --- /dev/null +++ b/src/core/workbench/app/src/components/AlertDialog.tsx @@ -0,0 +1,67 @@ +import { + Button, + AlertDialog as AlertDialogChakra, + AlertDialogBody, + AlertDialogFooter, + AlertDialogHeader, + AlertDialogContent, + AlertDialogOverlay, +} from '@chakra-ui/react'; +import * as React from 'react'; +import { useState } from 'react'; + +type IAlertDialogProps = { + isOpen: boolean; + header: string; + message: any; + confirmLabel: string; + onClose: () => void; + onConfirm?: () => void; + isLoadingOnSubmit?: boolean; + confirmColor?: string; +}; +export const AlertDialog = ({ + isOpen, + header, + message, + confirmLabel, + onClose, + onConfirm, + isLoadingOnSubmit = false, + confirmColor = 'red', +}: IAlertDialogProps) => { + const [isSubmitting, setIsSubmitting] = useState(false); + const onSubmit = () => { + if (isLoadingOnSubmit) { + setIsSubmitting(true); + onConfirm?.(); + } else { + onClose(); + onConfirm?.(); + } + }; + const cancelRef = React.useRef() as React.MutableRefObject; + + return ( + + + + + {header} + + + {message} + + + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/Breadcrumb.tsx b/src/core/workbench/app/src/components/Breadcrumb.tsx new file mode 100644 index 00000000..47a67639 --- /dev/null +++ b/src/core/workbench/app/src/components/Breadcrumb.tsx @@ -0,0 +1,55 @@ +import { ChevronRightIcon } from '@chakra-ui/icons'; +import { + Breadcrumb as Breadcrumbs, + BreadcrumbItem, + BreadcrumbLink, + Container, +} from '@chakra-ui/react'; +import { Link, useLocation } from 'react-router-dom'; + +import { getSiteContext } from '../utils/siteContext'; + +type TRoute = { + name: string; + link: string; +}; + +function getRoutes(pathname: string): TRoute[] { + const routes: TRoute[] = []; + const parts = pathname.split('/').slice(1); + parts.forEach((item, index) => { + if (item !== 'admin' && item !== 'edit') { + routes.push({ + name: item, + link: '/' + parts.slice(0, index + 1).join('/'), + }); + } + }); + return routes; +} + +export const Breadcrumb = (props: any) => { + const { pathname } = useLocation(); + const routes = getRoutes(pathname); + const siteContext = getSiteContext(); + const root = siteContext.isWorkbench ? 'workbench' : 'launchpad'; + + return ( + + }> + + + {root} + + + {routes.map((route) => ( + + + {route.name} + + + ))} + + + ); +}; diff --git a/src/core/workbench/app/src/components/Card.tsx b/src/core/workbench/app/src/components/Card.tsx new file mode 100644 index 00000000..b82a5733 --- /dev/null +++ b/src/core/workbench/app/src/components/Card.tsx @@ -0,0 +1,12 @@ +import { Box, BoxProps, useColorModeValue } from '@chakra-ui/react'; +import * as React from 'react'; + +export const Card = (props: BoxProps) => ( + +); diff --git a/src/core/workbench/app/src/components/DatacovesSpinner.tsx b/src/core/workbench/app/src/components/DatacovesSpinner.tsx new file mode 100644 index 00000000..7bb35909 --- /dev/null +++ b/src/core/workbench/app/src/components/DatacovesSpinner.tsx @@ -0,0 +1,8 @@ +import { chakra, ImageProps, forwardRef } from '@chakra-ui/react'; +import React from 'react'; + +import animatedLogo from '../assets/animated_logo_no_bg_small_200_15fps.gif'; + +export const DatacovesSpinner = forwardRef((props, ref) => { + return ; +}); diff --git a/src/core/workbench/app/src/components/Error/404.tsx b/src/core/workbench/app/src/components/Error/404.tsx new file mode 100644 index 00000000..0f822f55 --- /dev/null +++ b/src/core/workbench/app/src/components/Error/404.tsx @@ -0,0 +1,19 @@ +import { Flex } from '@chakra-ui/react'; + +import icon from '../../assets/404.svg'; +import { Header } from '../Header'; + +import { ErrorPage } from './ErrorPage'; +export const Error404Page = () => { + return ( + +
+ + + ); +}; diff --git a/src/core/workbench/app/src/components/Error/500.tsx b/src/core/workbench/app/src/components/Error/500.tsx new file mode 100644 index 00000000..f6c9e6a2 --- /dev/null +++ b/src/core/workbench/app/src/components/Error/500.tsx @@ -0,0 +1,73 @@ +import { Flex, Heading, Button, Text, Box, Link, Image } from '@chakra-ui/react'; + +import icon from '../../assets/500.svg'; +import logo_navyblue from '../../assets/logo_navyblue.svg'; +import { getSiteContext } from '../../utils/siteContext'; +import { HeaderOffline } from '../Header/HeaderOffline'; + +export const Error500Page = () => { + const siteContext = getSiteContext(); + const logoUrl = `https://${siteContext.launchpadHost}`; + return ( + <> + + + + + DataCoves Logo + + + + + + Error Icon + + + + + + + 500 + + + Internal Server Error + + + + + Something isn’t done loading + + + If this continues,{' '} + + contact support + + + + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/Error/Error.tsx b/src/core/workbench/app/src/components/Error/Error.tsx new file mode 100644 index 00000000..409cf324 --- /dev/null +++ b/src/core/workbench/app/src/components/Error/Error.tsx @@ -0,0 +1,27 @@ +import './error.css'; + +import { ChakraProvider } from '@chakra-ui/react'; + +import { UIProvider } from '../../context/UIProvider'; +import { main } from '../../themes'; + +import { Error500Page } from './500'; +import NotAuthorized from './NotAuthorized'; + +export function ErrorFallback({ error }: any) { + if ( + error.message.toString().lastIndexOf('code 403') !== -1 || + error.message.toString().lastIndexOf('code 401') !== -1 + ) { + return ( + + + + ); + } else + return ( + + + + ); +} diff --git a/src/core/workbench/app/src/components/Error/ErrorPage.tsx b/src/core/workbench/app/src/components/Error/ErrorPage.tsx new file mode 100644 index 00000000..9e518ff0 --- /dev/null +++ b/src/core/workbench/app/src/components/Error/ErrorPage.tsx @@ -0,0 +1,62 @@ +import { Box, Heading, Flex, Image, Text, Button } from '@chakra-ui/react'; + +import logo_navyblue from '../../assets/logo_navyblue.svg'; +import { getSiteContext } from '../../utils/siteContext'; +export const ErrorPage = ({ svg, title, subtitle, body }: any) => { + const siteContext = getSiteContext(); + const logoUrl = `https://${siteContext.launchpadHost}`; + return ( + + + + DataCoves Logo + + + + + + Error Icon + + + + + + + {title} + + + {subtitle} + + + + {body.map((line: string, index: number) => ( + + {line} + + ))} + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/Error/NotAuthorized.tsx b/src/core/workbench/app/src/components/Error/NotAuthorized.tsx new file mode 100644 index 00000000..51349c3d --- /dev/null +++ b/src/core/workbench/app/src/components/Error/NotAuthorized.tsx @@ -0,0 +1,33 @@ +import './error.css'; +import { Box, Button, Container, Flex, Heading, Stack, Text } from '@chakra-ui/react'; + +import { Header } from '../Header'; +import PageSidebarContainer from '../Sidebar/PageSidebarContainer'; + +const NotAuthorized = ({ error }: any) => ( + +
+ + + + + + Action not authorized + + {error?.response.data.detail ?? + "You don't have enough permissions to access this feature."} + + + + + + + + + + +); + +export default NotAuthorized; diff --git a/src/core/workbench/app/src/components/Error/error.css b/src/core/workbench/app/src/components/Error/error.css new file mode 100644 index 00000000..f36f3980 --- /dev/null +++ b/src/core/workbench/app/src/components/Error/error.css @@ -0,0 +1,461 @@ + +.main-error-page { + min-height: 600px; + margin: 40px auto; + width: auto; + max-width: 560px; + display: flex; + align-items: center; + justify-content: center; + flex-direction: column; +} + +.error-title { + max-width: 529px; + font-size: 38px; + font-weight: bold; + font-stretch: normal; + font-style: normal; + line-height: normal; + letter-spacing: normal; + text-align: center; + color: #4b4b62; + margin-bottom: 16px; +} + +.error-subtitle { + max-width: 568px; + font-size: 16px; + font-weight: normal; + font-stretch: normal; + font-style: normal; + line-height: 1.31; + letter-spacing: normal; + text-align: center; + /* color: #4b4b62; */ + margin-bottom: 24px; +} + +.pao-atras { + animation: leftright 1s alternate infinite; + transform-origin: center; +} + +.pao-frente { + animation: leftright 1s 0.3s alternate infinite; + transform-origin: center; +} + +.olho-esq { + animation: sad 2s alternate infinite; + transform-origin: center; +} + +.olho-dir { + animation: sad 2s alternate infinite; + transform-origin: center; +} + +.boca { + animation: sad 2s alternate infinite; + transform-origin: center; +} + +.raios { + -webkit-animation: flicker-4 4s linear infinite both; + animation: flicker-4 4s linear infinite both; +} + +.tomada { + -webkit-animation: vibrate-1 3s linear infinite both; + animation: vibrate-1 3s linear infinite both; +} + +.fio-500 { + -webkit-animation: vibrate-1 3s linear infinite both; + animation: vibrate-1 3s linear infinite both; +} + +.fio { + -webkit-animation: vibrate-1 3s linear infinite both; + animation: vibrate-1 3s linear infinite both; +} + +@keyframes scales { + from { + transform: scale(0.98); + } + + to { + transform: scale(1); + } +} + +/* ---------------------------------------------- + * Generated by Animista on 2020-4-1 14:58:16 + * Licensed under FreeBSD License. + * See http://animista.net/license for more info. + * w: http://animista.net, t: @cssanimista + * ---------------------------------------------- */ + +/** + * ---------------------------------------- + * animation flicker-4 + * ---------------------------------------- + */ +@-webkit-keyframes flicker-4 { + + 0%, + 100% { + opacity: 1; + } + + 31.98% { + opacity: 1; + } + + 32% { + opacity: 0; + } + + 32.8% { + opacity: 0; + } + + 32.82% { + opacity: 1; + } + + 34.98% { + opacity: 1; + } + + 35% { + opacity: 0; + } + + 35.7% { + opacity: 0; + } + + 35.72% { + opacity: 1; + } + + 36.98% { + opacity: 1; + } + + 37% { + opacity: 0; + } + + 37.6% { + opacity: 0; + } + + 37.62% { + opacity: 1; + } + + 67.98% { + opacity: 1; + } + + 68% { + opacity: 0; + } + + 68.4% { + opacity: 0; + } + + 68.42% { + opacity: 1; + } + + 95.98% { + opacity: 1; + } + + 96% { + opacity: 0; + } + + 96.7% { + opacity: 0; + } + + 96.72% { + opacity: 1; + } + + 98.98% { + opacity: 1; + } + + 99% { + opacity: 0; + } + + 99.6% { + opacity: 0; + } + + 99.62% { + opacity: 1; + } +} + +@keyframes flicker-4 { + + 0%, + 100% { + opacity: 1; + } + + 31.98% { + opacity: 1; + } + + 32% { + opacity: 0; + } + + 32.8% { + opacity: 0; + } + + 32.82% { + opacity: 1; + } + + 34.98% { + opacity: 1; + } + + 35% { + opacity: 0; + } + + 35.7% { + opacity: 0; + } + + 35.72% { + opacity: 1; + } + + 36.98% { + opacity: 1; + } + + 37% { + opacity: 0; + } + + 37.6% { + opacity: 0; + } + + 37.62% { + opacity: 1; + } + + 67.98% { + opacity: 1; + } + + 68% { + opacity: 0; + } + + 68.4% { + opacity: 0; + } + + 68.42% { + opacity: 1; + } + + 95.98% { + opacity: 1; + } + + 96% { + opacity: 0; + } + + 96.7% { + opacity: 0; + } + + 96.72% { + opacity: 1; + } + + 98.98% { + opacity: 1; + } + + 99% { + opacity: 0; + } + + 99.6% { + opacity: 0; + } + + 99.62% { + opacity: 1; + } +} + + +/* ---------------------------------------------- + * Generated by Animista on 2020-4-1 15:17:57 + * Licensed under FreeBSD License. + * See http://animista.net/license for more info. + * w: http://animista.net, t: @cssanimista + * ---------------------------------------------- */ + +/** + * ---------------------------------------- + * animation vibrate-1 + * ---------------------------------------- + */ +@-webkit-keyframes vibrate-1 { + 0% { + -webkit-transform: translate(0); + transform: translate(0); + } + + 20% { + -webkit-transform: translate(-2px, 2px); + transform: translate(-2px, 2px); + } + + 40% { + -webkit-transform: translate(-2px, -2px); + transform: translate(-2px, -2px); + } + + 60% { + -webkit-transform: translate(2px, 2px); + transform: translate(2px, 2px); + } + + 80% { + -webkit-transform: translate(2px, -2px); + transform: translate(2px, -2px); + } + + 100% { + -webkit-transform: translate(0); + transform: translate(0); + } +} + +@keyframes vibrate-1 { + 0% { + -webkit-transform: translate(0); + transform: translate(0); + } + + 20% { + -webkit-transform: translate(-2px, 2px); + transform: translate(-2px, 2px); + } + + 40% { + -webkit-transform: translate(-2px, -2px); + transform: translate(-2px, -2px); + } + + 60% { + -webkit-transform: translate(2px, 2px); + transform: translate(2px, 2px); + } + + 80% { + -webkit-transform: translate(2px, -2px); + transform: translate(2px, -2px); + } + + 100% { + -webkit-transform: translate(0); + transform: translate(0); + } +} + + + +/* ---------------------------------------------- + * Generated by Animista on 2020-4-1 15:42:45 + * Licensed under FreeBSD License. + * See http://animista.net/license for more info. + * w: http://animista.net, t: @cssanimista + * ---------------------------------------------- */ + +/** + * ---------------------------------------- + * animation wobble-ver-right + * ---------------------------------------- + */ +@-webkit-keyframes wobble-ver-right { + + 0%, + 100% { + -webkit-transform: translateY(0) rotate(0); + transform: translateY(0) rotate(0); + -webkit-transform-origin: 50% 50%; + transform-origin: 50% 50%; + } + + 15% { + -webkit-transform: translateY(-30px) rotate(6deg); + transform: translateY(-30px) rotate(6deg); + } + + 30% { + -webkit-transform: translateY(15px) rotate(-6deg); + transform: translateY(15px) rotate(-6deg); + } + + 45% { + -webkit-transform: translateY(-15px) rotate(3.6deg); + transform: translateY(-15px) rotate(3.6deg); + } + + 60% { + -webkit-transform: translateY(9px) rotate(-2.4deg); + transform: translateY(9px) rotate(-2.4deg); + } + + 75% { + -webkit-transform: translateY(-6px) rotate(1.2deg); + transform: translateY(-6px) rotate(1.2deg); + } +} + + +@keyframes sad { + 0% { + transform: rotateX(0deg) rotateY(0deg); + } + + 100% { + transform: rotateX(10deg) rotateY(5deg); + } +} + +@keyframes leftright { + + 0% { + transform: rotateZ(0deg) + } + + 100% { + transform: rotateZ(-15deg) + } +} \ No newline at end of file diff --git a/src/core/workbench/app/src/components/Error/index.ts b/src/core/workbench/app/src/components/Error/index.ts new file mode 100644 index 00000000..ae6e95d0 --- /dev/null +++ b/src/core/workbench/app/src/components/Error/index.ts @@ -0,0 +1 @@ +export * from './Error'; diff --git a/src/core/workbench/app/src/components/ExpandibleToast/ExpandibleToast.tsx b/src/core/workbench/app/src/components/ExpandibleToast/ExpandibleToast.tsx new file mode 100644 index 00000000..f1368c5a --- /dev/null +++ b/src/core/workbench/app/src/components/ExpandibleToast/ExpandibleToast.tsx @@ -0,0 +1,70 @@ +import { Box, Text, Collapse, Button, HStack } from '@chakra-ui/react'; +import { useState } from 'react'; +import { AiOutlineCheckCircle, AiOutlineWarning, AiOutlineExclamationCircle } from 'react-icons/ai'; + +type ToastStatus = 'info' | 'success' | 'warning' | 'error'; + +interface CustomToastProps { + message: string; + extra?: string; + status?: ToastStatus; +} + +const statusColors = { + info: 'blue.500', + success: 'green.500', + warning: 'orange.500', + error: 'red.500', +}; + +const statusBgColors = { + info: 'blue.50', + success: 'green.50', + warning: 'orange.50', + error: 'red.50', +}; + +const statusIcons = { + info: , + success: , + warning: , + error: , +}; + +const ExpandibleToast: React.FC = ({ message, extra, status = 'info' }) => { + const [isOpen, setIsOpen] = useState(false); + const toggle = () => setIsOpen(!isOpen); + + return ( + + + {statusIcons[status]} {/* Dynamically render the icon based on status */} + + {message} + + + {extra && ( + <> + + + + {extra} + + + + )} + + ); +}; + +export default ExpandibleToast; diff --git a/src/core/workbench/app/src/components/FieldGroup.tsx b/src/core/workbench/app/src/components/FieldGroup.tsx new file mode 100644 index 00000000..846838da --- /dev/null +++ b/src/core/workbench/app/src/components/FieldGroup.tsx @@ -0,0 +1,45 @@ +import { Box, Heading, Stack, StackProps } from '@chakra-ui/react'; +import * as React from 'react'; + +interface FieldGroupProps extends StackProps { + title?: string; + isTable?: boolean; +} + +export const FieldGroup = (props: FieldGroupProps) => { + const { title, isTable, children, ...flexProps } = props; + return ( + <> + {isTable ? ( + + + {title && ( + + {title} + + )} + + {children} + + ) : ( + + + {title && ( + + {title} + + )} + + {children} + + )} + + ); +}; diff --git a/src/core/workbench/app/src/components/FormTabs.tsx b/src/core/workbench/app/src/components/FormTabs.tsx new file mode 100644 index 00000000..5369e146 --- /dev/null +++ b/src/core/workbench/app/src/components/FormTabs.tsx @@ -0,0 +1,90 @@ +import { ChevronDownIcon } from '@chakra-ui/icons'; +import { Box, Collapse, Flex, Heading, Tab, TabList } from '@chakra-ui/react'; +import * as React from 'react'; +import { useState } from 'react'; + +interface FormTabsProps { + labels: Array }>; + isSecondary?: boolean; + activeTab?: number; + startIndex?: number; +} + +interface TabItemProps { + label: string; + isSecondary?: boolean; + isDisabled?: boolean; +} + +interface MenuSubitemProps { + labels: { [key: string]: Array }; + startIndex?: number; + activeTab?: number; +} + +export const FormTabs = (props: FormTabsProps) => { + const { labels, isSecondary, activeTab, startIndex } = props; + return ( + + {labels.map((label, index) => ( + + {typeof label == 'string' ? ( + activeTab, + })} + /> + ) : ( + typeof label == 'object' && ( + + ) + )} + + ))} + + ); +}; + +const TabItem = ({ label, isSecondary, isDisabled }: TabItemProps) => ( + + {label} + +); + +const MenuSubitem = ({ labels, startIndex, activeTab }: MenuSubitemProps) => { + const [isActive, setIsActive] = useState(true); + const label = Object.keys(labels)[0]; + const items = labels[label]; + return ( + <> + setIsActive((prev) => !prev)} + justifyContent="space-between" + alignItems="center" + p={4} + cursor="pointer" + {...(startIndex !== undefined && + activeTab !== undefined && + startIndex > activeTab && { opacity: 0.4 })} + > + {label} + + + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/Forms/FormikCheckbox.tsx b/src/core/workbench/app/src/components/Forms/FormikCheckbox.tsx new file mode 100644 index 00000000..2e29a01a --- /dev/null +++ b/src/core/workbench/app/src/components/Forms/FormikCheckbox.tsx @@ -0,0 +1,30 @@ +import { FormControl, FormErrorMessage } from '@chakra-ui/form-control'; +import { Checkbox, Text } from '@chakra-ui/react'; +import { useField } from 'formik'; +import React, { FC } from 'react'; + +type NewType = JSX.IntrinsicElements['input']; + +interface ICustomFieldProps extends NewType { + label?: string; + name: string; + value?: string; + disabled?: boolean; + hint?: string; +} + +// TODO: Deprecated in favor of 'formik-chakra-ui' +export const FormikCheckbox: FC = ({ label, ...props }) => { + const [field, meta] = useField({ ...props, type: 'checkbox' }); + return ( + + + {label} + + + {props.hint} + + {meta.touched && meta.error ? {meta.error} : null} + + ); +}; diff --git a/src/core/workbench/app/src/components/Forms/FormikIcon.tsx b/src/core/workbench/app/src/components/Forms/FormikIcon.tsx new file mode 100644 index 00000000..3c2b7711 --- /dev/null +++ b/src/core/workbench/app/src/components/Forms/FormikIcon.tsx @@ -0,0 +1,38 @@ +import { FormControl, FormErrorMessage, FormLabel } from '@chakra-ui/form-control'; +import { StarIcon } from '@chakra-ui/icons'; +import { Box, Input, Stack, Text } from '@chakra-ui/react'; +import { useField } from 'formik'; +import { FC } from 'react'; + +type NewType = JSX.IntrinsicElements['input']; + +interface ICustomFieldProps extends NewType { + label?: string; + name: string; + value?: string; + icon?: string; + hint?: string; +} + +// TODO: Deprecated in favor of 'formik-chakra-ui' +export const FormikIcon: FC = ({ label, ...props }) => { + const [field, meta] = useField(props); + + return ( + + {label} + + + + + + {props.hint && ( + + {props.hint} + + )} + {meta.touched && meta.error ? {meta.error} : null} + + + ); +}; diff --git a/src/core/workbench/app/src/components/Forms/FormikInput.tsx b/src/core/workbench/app/src/components/Forms/FormikInput.tsx new file mode 100644 index 00000000..c628ddd0 --- /dev/null +++ b/src/core/workbench/app/src/components/Forms/FormikInput.tsx @@ -0,0 +1,42 @@ +import { FormControl, FormErrorMessage, FormLabel } from '@chakra-ui/form-control'; +import { Box, Input, Text } from '@chakra-ui/react'; +import { useField } from 'formik'; +import React, { FC } from 'react'; + +type NewType = JSX.IntrinsicElements['input']; + +interface ICustomFieldProps extends NewType { + label?: string; + name: string; + type?: string; + value?: string; + readonly?: boolean; + hint?: string; + placeholder?: string; +} + +// TODO: Deprecated in favor of 'formik-chakra-ui' +export const FormikInput: FC = ({ label, ...props }) => { + const [field, meta] = useField(props); + + return ( + + {label} + + + {props.hint && ( + + {props.hint} + + )} + {meta.touched && meta.error ? {meta.error} : null} + + + ); +}; diff --git a/src/core/workbench/app/src/components/Forms/FormikLink.tsx b/src/core/workbench/app/src/components/Forms/FormikLink.tsx new file mode 100644 index 00000000..e79fa9f8 --- /dev/null +++ b/src/core/workbench/app/src/components/Forms/FormikLink.tsx @@ -0,0 +1,37 @@ +import { FormControl, FormLabel } from '@chakra-ui/form-control'; +import { ExternalLinkIcon } from '@chakra-ui/icons'; +import { Button, Stack, Link } from '@chakra-ui/react'; +import { useField } from 'formik'; +import { FC } from 'react'; + +type NewType = JSX.IntrinsicElements['input']; + +interface ICustomFieldProps extends NewType { + label: string; + name: string; + link: string; +} + +// TODO: Deprecated in favor of 'formik-chakra-ui' +export const FormikLink: FC = ({ label, ...props }) => { + const [, meta] = useField(props); + + return ( + + {label} + + + {props.link} + + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/Forms/FormikSelect.tsx b/src/core/workbench/app/src/components/Forms/FormikSelect.tsx new file mode 100644 index 00000000..44bc2336 --- /dev/null +++ b/src/core/workbench/app/src/components/Forms/FormikSelect.tsx @@ -0,0 +1,26 @@ +import { FormControl, FormLabel, FormErrorMessage } from '@chakra-ui/form-control'; +import { Select } from '@chakra-ui/select'; +import { useField } from 'formik'; +import React, { FC } from 'react'; + +type NewType = JSX.IntrinsicElements['input']; + +interface ICustomFieldProps extends NewType { + label?: string; + name: string; +} + +// TODO: Deprecated in favor of 'formik-chakra-ui' +export const FormikSelect: FC = ({ label, ...props }) => { + const [field, meta] = useField(props); + + return ( + + {label} + + {meta.touched && meta.error ? {meta.error} : null} + + ); +}; diff --git a/src/core/workbench/app/src/components/Header/EnvironmentDropdown.tsx b/src/core/workbench/app/src/components/Header/EnvironmentDropdown.tsx new file mode 100644 index 00000000..c044044a --- /dev/null +++ b/src/core/workbench/app/src/components/Header/EnvironmentDropdown.tsx @@ -0,0 +1,140 @@ +/* eslint-disable jsx-a11y/no-autofocus */ +import { ChevronDownIcon } from '@chakra-ui/icons'; +import { + Menu, + MenuItem, + MenuList, + MenuGroup, + MenuButton, + Button, + useColorModeValue as mode, + Badge, + Tooltip, + Box, + Input, + InputGroup, + InputLeftElement, +} from '@chakra-ui/react'; +import { sum } from 'lodash'; +import { useCallback, useContext, useMemo, useState } from 'react'; +import { BsSearch } from 'react-icons/bs'; + +import { UserContext, GetEnvironmentNameAndSlug } from '../../context/UserContext'; +import { Project } from '../../context/UserContext/types'; +import { getSiteContext } from '../../utils/siteContext'; + +export const EnvironmentDropdown = () => { + const { currentUser } = useContext(UserContext); + const siteContext = getSiteContext(); + const { currentEnvName, currentEnvSlug } = GetEnvironmentNameAndSlug(currentUser); + const [search, setSearch] = useState(''); + + const totalEnvs = useMemo( + () => sum(currentUser?.projects?.map(({ environments }) => environments.length)), + [currentUser?.projects] + ); + + const isInSearch = useCallback( + (text: string) => text.toLocaleLowerCase().includes(search.toLocaleLowerCase()), + [search] + ); + + const getFilteredEnvs = useCallback( + (project: Project) => + project.environments.filter((env) => isInSearch(env.name) || isInSearch(env.slug)), + [isInSearch] + ); + + const filteredProjects = useMemo( + () => + currentUser?.projects + ?.filter( + (project) => + getFilteredEnvs(project).length > 0 || + isInSearch(project.repository.git_url) || + isInSearch(project.name) + ) + ?.sort((a, b) => a.name.localeCompare(b.name)), + [currentUser?.projects, getFilteredEnvs, isInSearch] + ); + + return ( + + } + > + {currentEnvName} + {currentEnvSlug} + + + {totalEnvs >= 4 && ( + + + + + + setSearch(e.target.value)} + placeholder="Search" + autoFocus + /> + + + )} + {filteredProjects?.map((project) => ( +
+ environment.slug === siteContext.env) + ? 'semibold' + : 'normal' + } + title={project.name} + > + {(getFilteredEnvs(project).length ? getFilteredEnvs(project) : project.environments) + ?.sort((a, b) => a.name.localeCompare(b.name)) + .map((env) => ( + + (window.location.href = `https://${env.slug}.${siteContext.launchpadHost}`) + } + > + + + {env.name} + + {env.slug.toUpperCase()} + + + + + ))} + +
+ ))} +
+
+ ); +}; diff --git a/src/core/workbench/app/src/components/Header/FreeTrial.tsx b/src/core/workbench/app/src/components/Header/FreeTrial.tsx new file mode 100644 index 00000000..a2547912 --- /dev/null +++ b/src/core/workbench/app/src/components/Header/FreeTrial.tsx @@ -0,0 +1,62 @@ +import { InfoIcon } from '@chakra-ui/icons'; +import { Button, Tooltip } from '@chakra-ui/react'; +import React, { useContext, useEffect } from 'react'; +import { useNavigate } from 'react-router-dom'; + +import { AccountContext } from '../../context/AccountContext'; +import { Account } from '../../context/AccountContext/types'; +import { UserContext } from '../../context/UserContext'; + +export const FreeTrialButton = ({ isWorkbench = false }: { isWorkbench?: boolean }) => { + const { currentUser } = useContext(UserContext); + const { accounts, currentAccount, setCurrentAccount } = useContext(AccountContext); + const navigate = useNavigate(); + + useEffect(() => { + if (isWorkbench) { + const account = accounts?.find( + (account: Account) => account.slug === currentUser?.env_account + ); + if (account && account.slug !== currentAccount?.slug) { + setCurrentAccount(account); + } + } + }, [accounts, currentAccount, currentUser?.env_account, isWorkbench, setCurrentAccount]); + + return ( + <> + {currentUser && + currentUser?.features.accounts_signup && + currentAccount && + currentAccount.remaining_trial_days >= 0 && ( + 0 + ? `${currentAccount?.remaining_trial_days} days.` + : 'less than a day.') + + ' Click here to subscribe to a paid plan.' + } + hasArrow + bg="white" + color="black" + > + + + )} + + ); +}; diff --git a/src/core/workbench/app/src/components/Header/GrafanaButton.tsx b/src/core/workbench/app/src/components/Header/GrafanaButton.tsx new file mode 100644 index 00000000..10f91a15 --- /dev/null +++ b/src/core/workbench/app/src/components/Header/GrafanaButton.tsx @@ -0,0 +1,25 @@ +import { ViewIcon } from '@chakra-ui/icons'; +import { Link, Tooltip } from '@chakra-ui/react'; +import React, { useContext } from 'react'; + +import { UserContext } from '../../context/UserContext'; +import { getSiteContext } from '../../utils/siteContext'; + +export const GrafanaButton = () => { + const siteContext = getSiteContext(); + const { currentUser } = useContext(UserContext); + + return ( + <> + {currentUser && + currentUser?.features.observability_stack && + currentUser.permissions.find((permission) => permission.includes('services:grafana')) && ( + + + + + + )} + + ); +}; diff --git a/src/core/workbench/app/src/components/Header/Header.tsx b/src/core/workbench/app/src/components/Header/Header.tsx new file mode 100644 index 00000000..f3c1727b --- /dev/null +++ b/src/core/workbench/app/src/components/Header/Header.tsx @@ -0,0 +1,108 @@ +import { Flex, HStack, Box, Tooltip, Select, Text } from '@chakra-ui/react'; +import { useContext } from 'react'; + +import { AccountContext } from '../../context/AccountContext'; +import { Account } from '../../context/AccountContext/types'; +import { TabsContext } from '../../context/TabsContext'; +import { UserContext } from '../../context/UserContext'; +import { WorkbenchStatus } from '../../features/workbench/workbench'; +import { getSiteContext } from '../../utils/siteContext'; +import { Logo } from '../Icons/Logo'; + +import { EnvironmentDropdown } from './EnvironmentDropdown'; +import { FreeTrialButton } from './FreeTrial'; +import { GrafanaButton } from './GrafanaButton'; +import { MissingDeveloperLicense } from './MissingDeveloperLicense'; +import { NavMenu } from './NavMenu'; +import { ProfileDropdown } from './ProfileDropdown'; + +export function Header(props: { isWorkbench?: boolean; wstatus?: WorkbenchStatus }) { + const { isWorkbench, wstatus } = props; + const { currentTab } = useContext(TabsContext); + const { accounts, currentAccount, setCurrentAccount } = useContext(AccountContext); + const { currentUser } = useContext(UserContext); + + const bg = `${currentTab}.header`; + const dark = ['docs', 'load', 'transform', 'observe', 'orchestrate'].includes(currentTab); + const siteContext = getSiteContext(); + const logoUrl = currentUser?.env_account + ? `https://${siteContext.launchpadHost}?account=${currentAccount?.slug}` + : `https://${siteContext.launchpadHost}`; + + const onSelectAccount = (event: any) => { + const account = accounts?.find((account: Account) => account.slug === event.target.value); + setCurrentAccount(account); + }; + + return ( + + + + {/* Desktop Logo placement */} + + + + + + + + + {!isWorkbench && accounts && ( + + {accounts.length > 1 && ( + + )} + {accounts.length === 1 && ( + + {currentAccount?.name} + + )} + + )} + + {isWorkbench && currentUser?.projects && } + + + {/* Desktop Navigation Menu */} + {isWorkbench && } + + {/* Mobile Logo placement */} + + + + + + {!isWorkbench && } + + + + + ); +} diff --git a/src/core/workbench/app/src/components/Header/HeaderOffline.tsx b/src/core/workbench/app/src/components/Header/HeaderOffline.tsx new file mode 100644 index 00000000..cb434ff3 --- /dev/null +++ b/src/core/workbench/app/src/components/Header/HeaderOffline.tsx @@ -0,0 +1,37 @@ +import { Flex, Tooltip, Box } from '@chakra-ui/react'; + +import { getSiteContext } from '../../utils/siteContext'; + +import { Logo } from './Logo'; + +export function HeaderOffline() { + const siteContext = getSiteContext(); + const logoUrl = `https://${siteContext.launchpadHost}`; + return ( + + + + {/* Desktop Logo placement */} + + + + + + + + + + + ); +} diff --git a/src/core/workbench/app/src/components/Header/Logo.tsx b/src/core/workbench/app/src/components/Header/Logo.tsx new file mode 100644 index 00000000..74270087 --- /dev/null +++ b/src/core/workbench/app/src/components/Header/Logo.tsx @@ -0,0 +1,14 @@ +import { chakra, HTMLChakraProps } from '@chakra-ui/react'; + +export const Logo = (props: HTMLChakraProps<'svg'>) => ( + + + + +); diff --git a/src/core/workbench/app/src/components/Header/MissingDeveloperLicense.tsx b/src/core/workbench/app/src/components/Header/MissingDeveloperLicense.tsx new file mode 100644 index 00000000..c91f344c --- /dev/null +++ b/src/core/workbench/app/src/components/Header/MissingDeveloperLicense.tsx @@ -0,0 +1,25 @@ +import { WarningIcon } from '@chakra-ui/icons'; +import { Tag, Tooltip } from '@chakra-ui/react'; +import React, { useContext } from 'react'; + +import { UserContext } from '../../context/UserContext'; + +export const MissingDeveloperLicense = () => { + const { currentUser } = useContext(UserContext); + return ( + <> + {currentUser && !currentUser?.has_license && ( + + + Missing Developer License + + + )} + + ); +}; diff --git a/src/core/workbench/app/src/components/Header/MobileHamburgerMenu.tsx b/src/core/workbench/app/src/components/Header/MobileHamburgerMenu.tsx new file mode 100644 index 00000000..14f72a98 --- /dev/null +++ b/src/core/workbench/app/src/components/Header/MobileHamburgerMenu.tsx @@ -0,0 +1,19 @@ +import { Box } from '@chakra-ui/react'; +import { HiOutlineMenu, HiX } from 'react-icons/hi'; + +interface MobileHamburgerMenuProps { + onClick?: VoidFunction; + isOpen: boolean; +} + +export const MobileHamburgerMenu = (props: MobileHamburgerMenuProps) => { + const { onClick, isOpen } = props; + return ( + + + + {isOpen ? 'Close menu' : 'Open menu'} + + + ); +}; diff --git a/src/core/workbench/app/src/components/Header/NavItem.tsx b/src/core/workbench/app/src/components/Header/NavItem.tsx new file mode 100644 index 00000000..b8a795fa --- /dev/null +++ b/src/core/workbench/app/src/components/Header/NavItem.tsx @@ -0,0 +1,175 @@ +import { ChevronDownIcon } from '@chakra-ui/icons'; +import { + Box, + HStack, + ListItem, + Menu, + Tooltip, + UnorderedList, + useMenuButton, + UseMenuButtonProps, +} from '@chakra-ui/react'; +import { css } from '@emotion/react'; +import * as React from 'react'; +import { useState } from 'react'; + +import { openLink } from '../../utils/link'; +interface NavItemProps { + href?: string; + active?: boolean; + label: string; + onClick: (ev: any) => void; + dark?: boolean; + menuList?: React.ReactNode; + isEnabled?: boolean; + unmetConditions?: Array; +} + +interface TabMenuProps extends UseMenuButtonProps { + icon?: React.ReactNode; + label: string; +} + +const TabMenuButton = (props: TabMenuProps) => { + const { icon, label } = props; + const buttonProps = useMenuButton(props); + return ( + + {icon && ( + + {icon} + + )} + {label} + + + + + ); +}; + +interface DesktopNavItemProps extends NavItemProps { + icon?: React.ReactNode; +} + +const DesktopNavItem = (props: DesktopNavItemProps) => { + const { + icon, + label, + href = null, + active, + onClick, + dark, + menuList, + isEnabled = true, + unmetConditions = [], + } = props; + const menu = menuList !== undefined; + + const handleClick = (event: any) => { + const isCommandOrCtrlPressed = event.metaKey || event.ctrlKey; + + if (isCommandOrCtrlPressed) { + // Prevent the default behavior (e.g., opening the link) + event.preventDefault(); + + if (href) { + openLink(href); + } + } else { + onClick(event); + } + }; + const [hoveredMenu, setHoveredMenu] = useState(null); + const [isHovered, setIsHovered] = useState(false); + const handleMouseEnter = (label: string) => { + setHoveredMenu(label); + setIsHovered(true); + }; + const handleMouseLeave = () => { + setHoveredMenu(null); + setIsHovered(false); + }; + const customFocusStyle = css` + &:focus-visible { + outline: none; /* Overwrite the default outline */ + } + `; + const content = ( + handleMouseEnter(label)} + onMouseLeave={handleMouseLeave} + border={isHovered ? 'none' : undefined} + css={customFocusStyle} + > + {menu && ( + + + {menuList} + + )} + {!menu && icon && ( + + {icon} + + )} + {!menu && {label}} + + ); + + const tooltipContent = ( + <> + + {unmetConditions.map((condition, index) => ( + {condition} + ))} + + + ); + + return !isEnabled ? {content} : <>{content}; +}; + +const MobileNavItem = (props: NavItemProps) => { + const { label, active, onClick, dark } = props; + return ( + + {label} + + ); +}; + +export const NavItem = { + Desktop: DesktopNavItem, + Mobile: MobileNavItem, +}; diff --git a/src/core/workbench/app/src/components/Header/NavMenu.tsx b/src/core/workbench/app/src/components/Header/NavMenu.tsx new file mode 100644 index 00000000..ddd02b66 --- /dev/null +++ b/src/core/workbench/app/src/components/Header/NavMenu.tsx @@ -0,0 +1,496 @@ +import { ExternalLinkIcon } from '@chakra-ui/icons'; +import { + Box, + Divider, + Flex, + HStack, + MenuGroup, + MenuItem, + MenuList, + VStack, + Text, + List, + ListItem, + ListIcon, +} from '@chakra-ui/react'; +import { Fragment, useContext, useState } from 'react'; +import { + HiViewGridAdd, + HiAdjustments, + HiEye, + HiBookOpen, + HiInboxIn, + HiChartBar, + HiOutlinePlay, + HiOutlineUser, + HiOutlineUserGroup, + HiOutlineRefresh, + HiCode, +} from 'react-icons/hi'; +import { MdCheckCircle } from 'react-icons/md'; + +import { TabsContext } from '../../context/TabsContext'; +import { + UserContext, + HasTabAccess, + tabPermissions, + confirmUserPermissions, + HasWorkbenchAccess, + HasWorkbenchServiceAccess, +} from '../../context/UserContext'; +import { restartCodeServer } from '../../features/global/api/restartCodeServer'; +import { startLocalAirflow } from '../../features/global/api/startLocalAirflow'; +import { WebSocketContext } from '../../features/global/websocket/WebSocketContext'; +import { WorkbenchStatus } from '../../features/workbench/workbench'; +import { + analyzeLink, + docsLink, + loadLink, + localAirflowLink, + openLink, + orchestrateLink, + transformLink, +} from '../../utils/link'; +import { getSiteContext } from '../../utils/siteContext'; +import { AlertDialog } from '../AlertDialog'; + +import { NavItem } from './NavItem'; + +const MobileNavMenu = (props: { isOpen?: boolean; dark?: boolean }) => { + const { isOpen } = props; + const { currentTab, setCurrentTab } = useContext(TabsContext); + const { currentUser } = useContext(UserContext); + const tabs = ['docs', 'load', 'transform', 'observe', 'orchestrate', 'analyze']; + + return ( + + ); +}; + +const DesktopNavMenu = (props: { dark?: boolean; wstatus?: WorkbenchStatus }) => { + const { isWebSocketReady, sendMessageBySocket } = useContext(WebSocketContext); + const { currentTab, setCurrentTab } = useContext(TabsContext); + const { currentUser } = useContext(UserContext); + const siteContext = getSiteContext(); + const [isConfirmOpen, setIsConfirmOpen] = useState(false); + const onClose = () => setIsConfirmOpen(false); + const [isLocalAirflowConfirmOpen, setIsLocalAirflowConfirmOpen] = useState(false); + const onCloseLocalAirflow = () => setIsLocalAirflowConfirmOpen(false); + const [showRestartingText, setShowRestartingText] = useState(false); + const [isLocalAirflowContinueOpen, setIsLocalAirflowContinueOpen] = useState( + (() => { + return localStorage.getItem('openLocalAirflow') === 'true'; + })() + ); + + const envShareLinks = currentUser?.user_environments.find( + (x) => x.env_slug === siteContext.env + )?.share_links; + const shareLinks: string[] = envShareLinks ? Object.keys(envShareLinks) : []; + + const envSlug = getSiteContext().env; + const project = currentUser?.projects.find((project) => + project.environments.find((environment) => environment.slug === envSlug) + ); + const env = project && project.environments.find((environment) => environment.slug === envSlug); + + const handleResetCodeServer = () => { + if (siteContext.env) { + setShowRestartingText(true); + if (isWebSocketReady) { + sendMessageBySocket({ + message_type: 'env.restart.code-server', + env_slug: siteContext.env, + }); + setTimeout(() => window.location.reload(), 5000); + } else { + restartCodeServer(siteContext.env).then(() => { + setTimeout(() => window.location.reload(), 5000); + }); + } + } + }; + + const handleStartLocalAirflow = () => { + if (siteContext.env) { + setShowRestartingText(true); + + if (isWebSocketReady) { + sendMessageBySocket({ + message_type: 'env.start.local-airflow', + env_slug: siteContext.env, + }); + setTimeout(() => { + localStorage.setItem('openLocalAirflow', 'true'); + localStorage.setItem('currentTab', JSON.stringify('transform')); + window.location.reload(); + }, 5000); + } else { + startLocalAirflow(siteContext.env).then(() => { + setTimeout(() => { + localStorage.setItem('openLocalAirflow', 'true'); + localStorage.setItem('currentTab', JSON.stringify('transform')); + window.location.reload(); + }, 5000); + }); + } + } + }; + + const handleContinueLocalAirflow = () => { + if (currentUser) { + openLink(localAirflowLink(currentUser.slug)); + onCloseContinueLocalAirflow(); + } + }; + + const onCloseContinueLocalAirflow = () => { + setIsLocalAirflowContinueOpen(false); + + // Clean the local storage + localStorage.removeItem('openLocalAirflow'); + setCurrentTab('transform'); + }; + + const shouldShowTab = (tab: string) => { + if (currentUser) { + // If local airflow is enabled we also check for code-server on the orchestrate tab + const tabPerms = tabPermissions[tab].concat( + tab === 'orchestrate' && currentUser?.features.local_airflow ? ['code-server'] : [] + ); + + const checkServicePermission = (code: string) => + env?.services[code]?.enabled && + env?.services[code]?.valid && + confirmUserPermissions(currentUser, [code], envSlug, project?.slug); + + const getRequiredCheckMethod = () => { + if (tab === 'orchestrate') { + return env?.type === 'prod' ? 'some' : 'every'; + } + return 'some'; + }; + + return tabPerms.length > 0 + ? tabPerms[getRequiredCheckMethod()](checkServicePermission) + : HasWorkbenchAccess(currentUser); + } + return false; + }; + + const getUnmetConditions = (tab: string) => { + const unmetConditions: Array = []; + if (tab !== 'docs') { + if (currentUser) { + const servicesInvalid = tabPermissions[tab].filter( + (code) => env?.services[code].valid === false + ); + if (servicesInvalid.length === tabPermissions[tab].length) { + servicesInvalid.map((service) => + env?.services[service].unmet_preconditions?.map((unmet) => + unmetConditions.push(unmet.message) + ) + ); + } + } + } + return unmetConditions; + }; + + const getUnmetUserServiceConditions = (service: string) => { + if (currentUser) { + const ue = currentUser.user_environments.find( + (environment) => environment.env_slug == envSlug + ); + + if (ue?.services[service]) { + return ue.services[service].unmet_preconditions; + } + } + + return []; + }; + + return ( + + {currentUser && shouldShowTab('docs') && ( + setCurrentTab('docs')} + active={currentTab === 'docs'} + icon={} + label="Docs" + dark={props.dark} + href={docsLink(true)} + isEnabled={!getUnmetConditions('docs').length} + unmetConditions={getUnmetConditions('docs')} + /> + )} + {currentUser && shouldShowTab('load') && ( + setCurrentTab('load')} + active={currentTab === 'load'} + icon={} + label="Load" + dark={props.dark} + href={loadLink()} + isEnabled={!getUnmetConditions('load').length} + unmetConditions={getUnmetConditions('load')} + /> + )} + {currentUser && shouldShowTab('transform') && currentUser.has_license && ( + + HasTabAccess(currentUser, 'transform') && + currentUser.has_license && + setCurrentTab('transform') + } + active={currentTab === 'transform'} + icon={} + label="Transform" + dark={props.dark} + isEnabled={!getUnmetConditions('transform').length} + unmetConditions={getUnmetConditions('transform')} + href={transformLink(currentUser.slug)} + menuList={ + (!getUnmetConditions('transform').length && currentUser?.features.codeserver_restart) || + shareLinks.length > 0 ? ( + + setCurrentTab('transform')} icon={}> + Open VS Code + + setIsConfirmOpen(true)} + icon={} + isDisabled={ + !props.wstatus?.services || + props.wstatus?.services?.['code-server'] !== 'running' + } + > + Reset my environment + + {shareLinks.length > 0 && ( + <> + + + {shareLinks.map((service: string) => ( + } + onClick={() => envShareLinks && openLink(envShareLinks[service])} + key={service} + > + {service} + + ))} + + + )} + + ) : undefined + } + /> + )} + {currentUser && shouldShowTab('observe') && ( + setCurrentTab('observe')} + active={currentTab === 'observe'} + icon={} + label="Observe" + dark={props.dark} + isEnabled={!getUnmetConditions('observe').length} + unmetConditions={getUnmetConditions('observe')} + /> + )} + {currentUser && + shouldShowTab('orchestrate') && + (currentUser?.features.local_airflow && + confirmUserPermissions( + currentUser, + ['airflow:admin', 'airflow:sysadmin'], + envSlug, + project?.slug + ) ? ( + setCurrentTab('orchestrate')} + active={currentTab === 'orchestrate'} + icon={} + label="Orchestrate" + dark={props.dark} + href={orchestrateLink()} + isEnabled={!getUnmetConditions('orchestrate').length} + unmetConditions={getUnmetConditions('orchestrate')} + menuList={ + + {HasWorkbenchServiceAccess(currentUser, ['airflow']) && ( + setCurrentTab('orchestrate')} + icon={} + > + Open Team Airflow + + )} + {env?.type != 'prod' ? ( + getUnmetUserServiceConditions('local-airflow').length ? ( + <> + { + event.stopPropagation(); + setIsLocalAirflowConfirmOpen(true); + }} + isDisabled={props.wstatus?.services?.['code-server'] !== 'running'} + icon={} + > + Start My Airflow + + + ) : ( + <> + openLink(localAirflowLink(currentUser.slug))} + isDisabled={props.wstatus?.services?.['code-server'] !== 'running'} + icon={} + > + Open My Airflow + + + ) + ) : ( + <> + )} + + } + /> + ) : ( + setCurrentTab('orchestrate')} + active={currentTab === 'orchestrate'} + icon={} + label="Orchestrate" + dark={props.dark} + href={orchestrateLink()} + isEnabled={!getUnmetConditions('orchestrate').length} + unmetConditions={getUnmetConditions('orchestrate')} + /> + ))} + {currentUser && shouldShowTab('analyze') && ( + setCurrentTab('analyze')} + active={currentTab === 'analyze'} + icon={} + label="Analyze" + dark={props.dark} + href={analyzeLink()} + isEnabled={!getUnmetConditions('analyze').length} + unmetConditions={getUnmetConditions('analyze')} + /> + )} + + This action will restore the following items to defaults: + + + + Python libraries + + + + Code extensions + + + + Code settings + + + + SSH and database keys + + + + dbt profiles + + + {showRestartingText && Restarting your environment...} + + } + confirmLabel="OK, go ahead" + onClose={onClose} + onConfirm={handleResetCodeServer} + isLoadingOnSubmit={true} + /> + + + Starting{' '}My Airflow will require a reload of VS Code. + You will not lose any settings or data. + + {showRestartingText && Reloading...} + + } + confirmLabel="OK, go ahead" + onClose={onCloseLocalAirflow} + onConfirm={handleStartLocalAirflow} + isLoadingOnSubmit={true} + confirmColor="green" + /> + + Any changes to your DAGS in VS Code will appear in your{' '}My Airflow instance + + Click the button below to open {' '}My Airflow in a new tab. + + + } + confirmLabel="Open My Airflow" + onClose={onCloseContinueLocalAirflow} + onConfirm={handleContinueLocalAirflow} + isLoadingOnSubmit={true} + confirmColor="green" + /> + + ); +}; + +export const NavMenu = { + Mobile: MobileNavMenu, + Desktop: DesktopNavMenu, +}; diff --git a/src/core/workbench/app/src/components/Header/Notification.tsx b/src/core/workbench/app/src/components/Header/Notification.tsx new file mode 100644 index 00000000..f90973ef --- /dev/null +++ b/src/core/workbench/app/src/components/Header/Notification.tsx @@ -0,0 +1,37 @@ +import { Box, Center, CenterProps } from '@chakra-ui/react'; +import * as React from 'react'; +import { FaBell } from 'react-icons/fa'; + +const NotificationBadge = (props: CenterProps) => ( +
+); + +export const Notification = (props: CenterProps) => ( +
+ Click to see 9 notifications + 9 + +
+); diff --git a/src/core/workbench/app/src/components/Header/ProfileDropdown.tsx b/src/core/workbench/app/src/components/Header/ProfileDropdown.tsx new file mode 100644 index 00000000..675f001b --- /dev/null +++ b/src/core/workbench/app/src/components/Header/ProfileDropdown.tsx @@ -0,0 +1,81 @@ +import { + Avatar, + Box, + Flex, + HStack, + Menu, + MenuItem, + MenuList, + Text, + useMenuButton, + UseMenuButtonProps, + useColorModeValue as mode, + Divider, +} from '@chakra-ui/react'; +import React, { useContext } from 'react'; +import { useNavigate } from 'react-router-dom'; + +import { API_URL } from '../../config'; +import { UserContext } from '../../context/UserContext'; + +const UserAvatar = () => { + const { currentUser } = useContext(UserContext); + return ; +}; + +const ProfileMenuButton = (props: UseMenuButtonProps) => { + const buttonProps = useMenuButton(props); + return ( + + Open user menu + + + ); +}; + +export const ProfileDropdown = () => { + const navigate = useNavigate(); + const { currentUser } = useContext(UserContext); + + return ( + + + + + + + + {currentUser?.name || 'User Name'} + + {currentUser?.email} + + + + + {(currentUser?.features.user_profile_change_credentials || + currentUser?.features.user_profile_change_name || + currentUser?.features.user_profile_delete_account) && ( + navigate('/settings')} fontWeight="medium"> + Settings + + )} + + (window.location = `${API_URL}/iam/logout` as any)} + > + Sign out + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/Header/index.ts b/src/core/workbench/app/src/components/Header/index.ts new file mode 100644 index 00000000..266dec8a --- /dev/null +++ b/src/core/workbench/app/src/components/Header/index.ts @@ -0,0 +1 @@ +export * from './Header'; diff --git a/src/core/workbench/app/src/components/Icons/Airbyte.tsx b/src/core/workbench/app/src/components/Icons/Airbyte.tsx new file mode 100644 index 00000000..83791b01 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Airbyte.tsx @@ -0,0 +1,10 @@ +import { Icon } from '@chakra-ui/react'; + +export const AirbyteIcon = (props: any) => ( + + + +); diff --git a/src/core/workbench/app/src/components/Icons/AzureDevops.tsx b/src/core/workbench/app/src/components/Icons/AzureDevops.tsx new file mode 100644 index 00000000..7b9c6c67 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/AzureDevops.tsx @@ -0,0 +1,11 @@ +import { Icon } from '@chakra-ui/react'; + +export const AzureDevopsIcon = (props: any) => ( + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Bigquery.tsx b/src/core/workbench/app/src/components/Icons/Bigquery.tsx new file mode 100644 index 00000000..de109aa2 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Bigquery.tsx @@ -0,0 +1,18 @@ +import { Icon } from '@chakra-ui/react'; + +export const BigqueryIcon = (props: any) => ( + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Bitbucket.tsx b/src/core/workbench/app/src/components/Icons/Bitbucket.tsx new file mode 100644 index 00000000..f589bb83 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Bitbucket.tsx @@ -0,0 +1,18 @@ +import { Icon } from '@chakra-ui/react'; +import React from 'react'; + +export const BitbucketIcon = (props: any) => ( + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/BrowseData.tsx b/src/core/workbench/app/src/components/Icons/BrowseData.tsx new file mode 100644 index 00000000..0cf4941a --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/BrowseData.tsx @@ -0,0 +1,10 @@ +import { Icon } from '@chakra-ui/react'; + +export const BrowseDataIcon = (props: any) => ( + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Dagster.tsx b/src/core/workbench/app/src/components/Icons/Dagster.tsx new file mode 100644 index 00000000..51c4490b --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Dagster.tsx @@ -0,0 +1,26 @@ +import { Icon } from '@chakra-ui/react'; + +export const DagsterIcon = (props: any) => ( + + + + + + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/DataHub.tsx b/src/core/workbench/app/src/components/Icons/DataHub.tsx new file mode 100644 index 00000000..dc0c51d2 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/DataHub.tsx @@ -0,0 +1,18 @@ +import { Icon } from '@chakra-ui/react'; + +export const DataHubIcon = (props: any) => ( + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Databricks.tsx b/src/core/workbench/app/src/components/Icons/Databricks.tsx new file mode 100644 index 00000000..b5710a28 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Databricks.tsx @@ -0,0 +1,22 @@ +import { Icon } from '@chakra-ui/react'; + +export const DatabricksIcon = (props: any) => ( + + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Datacoves.tsx b/src/core/workbench/app/src/components/Icons/Datacoves.tsx new file mode 100644 index 00000000..022628c2 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Datacoves.tsx @@ -0,0 +1,11 @@ +import { Icon } from '@chakra-ui/react'; +import * as React from 'react'; + +export const Datacoves = (props: any) => ( + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Dbt.tsx b/src/core/workbench/app/src/components/Icons/Dbt.tsx new file mode 100644 index 00000000..31d24fbe --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Dbt.tsx @@ -0,0 +1,10 @@ +import { Icon } from '@chakra-ui/react'; + +export const DbtIcon = (props: any) => ( + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Docs.tsx b/src/core/workbench/app/src/components/Icons/Docs.tsx new file mode 100644 index 00000000..c0cf08d3 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Docs.tsx @@ -0,0 +1,15 @@ +import { Icon } from '@chakra-ui/react'; + +export const DocsIcon = (props: any) => ( + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Github.tsx b/src/core/workbench/app/src/components/Icons/Github.tsx new file mode 100644 index 00000000..a46da723 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Github.tsx @@ -0,0 +1,11 @@ +import { Icon } from '@chakra-ui/react'; + +export const GithubIcon = (props: any) => ( + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Gitlab.tsx b/src/core/workbench/app/src/components/Icons/Gitlab.tsx new file mode 100644 index 00000000..2bf0e6fd --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Gitlab.tsx @@ -0,0 +1,13 @@ +import { Icon } from '@chakra-ui/react'; + +export const GitlabIcon = (props: any) => ( + + + + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Home.tsx b/src/core/workbench/app/src/components/Icons/Home.tsx new file mode 100644 index 00000000..b4485883 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Home.tsx @@ -0,0 +1,14 @@ +import { Icon } from '@chakra-ui/react'; + +export const HomeIcon = (props: any) => ( + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Logo.tsx b/src/core/workbench/app/src/components/Icons/Logo.tsx new file mode 100644 index 00000000..94d08022 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Logo.tsx @@ -0,0 +1,15 @@ +import { Icon } from '@chakra-ui/react'; +import * as React from 'react'; + +export const Logo = (props: any) => ( + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/NewQuestion.tsx b/src/core/workbench/app/src/components/Icons/NewQuestion.tsx new file mode 100644 index 00000000..c3d046fc --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/NewQuestion.tsx @@ -0,0 +1,10 @@ +import { Icon } from '@chakra-ui/react'; + +export const NewQuestionIcon = (props: any) => ( + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Redshift.tsx b/src/core/workbench/app/src/components/Icons/Redshift.tsx new file mode 100644 index 00000000..ff82d062 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Redshift.tsx @@ -0,0 +1,16 @@ +import { Icon } from '@chakra-ui/react'; + +export const RedshiftIcon = (props: any) => ( + + + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Snowflake.tsx b/src/core/workbench/app/src/components/Icons/Snowflake.tsx new file mode 100644 index 00000000..3cf45591 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Snowflake.tsx @@ -0,0 +1,34 @@ +import { Icon } from '@chakra-ui/react'; + +export const SnowflakeIcon = (props: any) => ( + + + + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Sql.tsx b/src/core/workbench/app/src/components/Icons/Sql.tsx new file mode 100644 index 00000000..dd019b23 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Sql.tsx @@ -0,0 +1,48 @@ +import { Icon } from '@chakra-ui/react'; + +export const SqlIcon = (props: any) => ( + + + + + + + + + + + + + + + + + + + + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Superset.tsx b/src/core/workbench/app/src/components/Icons/Superset.tsx new file mode 100644 index 00000000..cfbde5a2 --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Superset.tsx @@ -0,0 +1,18 @@ +import { Icon } from '@chakra-ui/react'; + +export const SupersetIcon = (props: any) => ( + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Table.tsx b/src/core/workbench/app/src/components/Icons/Table.tsx new file mode 100644 index 00000000..46f83bab --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Table.tsx @@ -0,0 +1,20 @@ +import { Icon } from '@chakra-ui/react'; + +export const TableIcon = (props: any) => ( + + + + + + + + + + + + + +); diff --git a/src/core/workbench/app/src/components/Icons/Testing.tsx b/src/core/workbench/app/src/components/Icons/Testing.tsx new file mode 100644 index 00000000..7c509ebd --- /dev/null +++ b/src/core/workbench/app/src/components/Icons/Testing.tsx @@ -0,0 +1,22 @@ +import { Icon } from '@chakra-ui/react'; + +export const TestingIcon = (props: any) => ( + + + + + + +); diff --git a/src/core/workbench/app/src/components/Learn/Learn.tsx b/src/core/workbench/app/src/components/Learn/Learn.tsx new file mode 100644 index 00000000..969f22ea --- /dev/null +++ b/src/core/workbench/app/src/components/Learn/Learn.tsx @@ -0,0 +1,80 @@ +import { + Box, + Text, + Accordion, + AccordionItem, + AccordionButton, + AccordionPanel, + AccordionIcon, +} from '@chakra-ui/react'; +import ChakraUIRenderer from 'chakra-ui-markdown-renderer'; +import ReactMarkdown from 'react-markdown'; + +export function Learn() { + const firstMd = ` + * [Quickstart](https://docs.airbyte.com/quickstart/getting-started/#set-up-your-preferences) + * [Sources catalog](https://docs.airbyte.com/category/sources) + * [Destinations catalog](https://docs.airbyte.com/category/destinations)`; + const secondMd = ` + * [Snowflake SQL reference](https://docs.snowflake.com/en/sql-reference-commands.html) + * [Querying Semi-Structured (JSON) data](https://docs.snowflake.com/en/user-guide/querying-semistructured.html)`; + const thirdMd = ` + * [Version control in VS Code](https://code.visualstudio.com/docs/introvideos/versioncontrol) + * [dbt commands reference](https://docs.getdbt.com/reference/dbt-commands)`; + return ( + + + Documentation + + + +

+ + + Airbyte + + + +

+ + {firstMd} + +
+ + +

+ + + Snowflake reference + + + +

+ + {secondMd} + +
+ + +

+ + + VS Code and dbt + + + +

+ + {thirdMd} + +
+
+
+ ); +} diff --git a/src/core/workbench/app/src/components/Learn/index.ts b/src/core/workbench/app/src/components/Learn/index.ts new file mode 100644 index 00000000..90a2cc09 --- /dev/null +++ b/src/core/workbench/app/src/components/Learn/index.ts @@ -0,0 +1 @@ +export * from './Learn'; diff --git a/src/core/workbench/app/src/components/LoadingWrapper.tsx b/src/core/workbench/app/src/components/LoadingWrapper.tsx new file mode 100644 index 00000000..b80f2f65 --- /dev/null +++ b/src/core/workbench/app/src/components/LoadingWrapper.tsx @@ -0,0 +1,21 @@ +import { Center } from '@chakra-ui/layout'; +import { Spinner } from '@chakra-ui/spinner'; +import { ReactNode } from 'react'; + +interface Props { + isLoading: boolean; + showElements?: boolean; + children: ReactNode; +} + +export const LoadingWrapper = ({ isLoading, children, showElements = true }: Props) => ( + <> + {isLoading || !showElements ? ( +
+ +
+ ) : ( + showElements && children + )} + +); diff --git a/src/core/workbench/app/src/components/MaxWidthTableCell.tsx b/src/core/workbench/app/src/components/MaxWidthTableCell.tsx new file mode 100644 index 00000000..605fdb0f --- /dev/null +++ b/src/core/workbench/app/src/components/MaxWidthTableCell.tsx @@ -0,0 +1,20 @@ +import { Box, Stack, Tooltip } from '@chakra-ui/react'; + +interface MaxWidthTableCellProps { + value: string; + maxW: string; +} +export const MaxWidthTableCell = (props: MaxWidthTableCellProps) => { + const { value, maxW } = props; + return ( + + + + + {value} + + + + + ); +}; diff --git a/src/core/workbench/app/src/components/ModalSpinner.tsx b/src/core/workbench/app/src/components/ModalSpinner.tsx new file mode 100644 index 00000000..372d5c68 --- /dev/null +++ b/src/core/workbench/app/src/components/ModalSpinner.tsx @@ -0,0 +1,90 @@ +import { + VStack, + Text, + Flex, + Card, + CardHeader, + CardBody, + Heading, + Center, + Box, + HStack, +} from '@chakra-ui/react'; +import { sample } from 'lodash'; +import React, { useEffect, useState } from 'react'; +import { FaLightbulb } from 'react-icons/fa'; + +import { DatacovesSpinner } from './DatacovesSpinner'; +import { replaceJSX, TIPS } from './utils'; + +interface ModalSpinnerProps { + message: string; + showSpinner?: boolean; + sidebar?: boolean; + details?: string[]; +} + +export const ModalSpinner = ({ message, showSpinner, sidebar, details }: ModalSpinnerProps) => { + const [tip, setTip] = useState(sample(TIPS)); + + useEffect(() => { + const interval = setInterval(() => { + setTip(sample(TIPS)); + }, 15000); + + return () => clearInterval(interval); + }, []); + + return ( + + + + {message} + + + {showSpinner && ( + +
+ +
+ + + + + + {tip?.service} + + {replaceJSX(tip?.tip || '')} + + + + {details && details.length > 0 && ( + + + + + + {details?.map((detail, index) => ( + + {detail} + + ))} + + + + + + )} +
+ )} +
+
+
+ ); +}; diff --git a/src/core/workbench/app/src/components/Notification.tsx b/src/core/workbench/app/src/components/Notification.tsx new file mode 100644 index 00000000..450234e7 --- /dev/null +++ b/src/core/workbench/app/src/components/Notification.tsx @@ -0,0 +1,57 @@ +import { + Box, + Button, + Center, + Flex, + FlexProps, + Icon, + Stack, + StackDivider, + useColorModeValue, +} from '@chakra-ui/react'; +import * as React from 'react'; +import { FiInfo } from 'react-icons/fi'; + +interface NotificationProps extends FlexProps { + primaryAction: React.ReactNode; + secondaryAction: React.ReactNode; +} + +export const Notification = (props: NotificationProps) => { + const { primaryAction, secondaryAction, children, ...flexProps } = props; + return ( + +
+ +
+ } spacing="0"> + + {children} + + } + spacing="0" + > + {primaryAction} + {secondaryAction} + + +
+ ); +}; + +export const NotificationButton = (props: any) => ( + + + + + + ); +} diff --git a/src/core/workbench/app/src/components/ProductBox/__tests__/ProductBox.test.tsx b/src/core/workbench/app/src/components/ProductBox/__tests__/ProductBox.test.tsx new file mode 100644 index 00000000..63b43418 --- /dev/null +++ b/src/core/workbench/app/src/components/ProductBox/__tests__/ProductBox.test.tsx @@ -0,0 +1,39 @@ +import { screen, rtlRender, waitFor, act } from '../../../test/test-utils'; +import { ProductBox } from '../ProductBox'; + +// const text = `Documentation`; +const url = ''; +const title = 'Bring your data together'; +const subtitle = 'Powered by AirByte'; +const text = 'Extract and load data from a variety of sources and keep the data synchronized.'; +const button = 'Load'; + +jest.mock('@chakra-ui/react', () => { + const originalModule = jest.requireActual('@chakra-ui/react'); + return { + __esModule: true, + ...originalModule, + useBreakpointValue: jest.fn().mockImplementation(() => false), + }; +}); + +describe('ProductBox Tests', () => { + test('should handle basic ProductBox flow', async () => { + const productBox = ( + console.log('Load')} + /> + ); + act(() => { + rtlRender(productBox); + }); + expect(screen.getByText(title)).toBeInTheDocument(); + await waitFor(() => expect(document.title).toEqual('')); + // await waitFor(() => expect(screen.queryByText(title)).not.toBeInTheDocument()); + }); +}); diff --git a/src/core/workbench/app/src/components/ProductBox/index.ts b/src/core/workbench/app/src/components/ProductBox/index.ts new file mode 100644 index 00000000..14c87b43 --- /dev/null +++ b/src/core/workbench/app/src/components/ProductBox/index.ts @@ -0,0 +1 @@ +export * from './ProductBox'; diff --git a/src/core/workbench/app/src/components/ProfileForm/FieldGroup.tsx b/src/core/workbench/app/src/components/ProfileForm/FieldGroup.tsx new file mode 100644 index 00000000..850528b5 --- /dev/null +++ b/src/core/workbench/app/src/components/ProfileForm/FieldGroup.tsx @@ -0,0 +1,22 @@ +import { Box, BoxProps, Text, useColorModeValue as mode } from '@chakra-ui/react'; +import * as React from 'react'; + +interface FieldGroupProps extends BoxProps { + title: string; + description: string; +} + +export const FieldGroup = (props: FieldGroupProps) => { + const { title, description, ...boxProps } = props; + return ( + + {title} + {description && ( + + {description} + + )} + + + ); +}; diff --git a/src/core/workbench/app/src/components/ProfileForm/HeadingGroup.tsx b/src/core/workbench/app/src/components/ProfileForm/HeadingGroup.tsx new file mode 100644 index 00000000..d96149e2 --- /dev/null +++ b/src/core/workbench/app/src/components/ProfileForm/HeadingGroup.tsx @@ -0,0 +1,19 @@ +import { Heading, Stack, StackProps, Text, useColorModeValue } from '@chakra-ui/react'; +import * as React from 'react'; + +interface HeadingGroupProps extends StackProps { + title: string; + description: string; +} + +export const HeadingGroup = (props: HeadingGroupProps) => { + const { title, description, ...stackProps } = props; + return ( + + + {title} + + {description} + + ); +}; diff --git a/src/core/workbench/app/src/components/ProfileForm/__tests__/FieldGroup.test.tsx b/src/core/workbench/app/src/components/ProfileForm/__tests__/FieldGroup.test.tsx new file mode 100644 index 00000000..f6ae8f41 --- /dev/null +++ b/src/core/workbench/app/src/components/ProfileForm/__tests__/FieldGroup.test.tsx @@ -0,0 +1,23 @@ +import { screen, rtlRender, waitFor } from '../../../test/test-utils'; +import { FieldGroup } from '../FieldGroup'; + +const title = 'Name'; +const description = 'Change account name'; + +jest.mock('@chakra-ui/react', () => { + const originalModule = jest.requireActual('@chakra-ui/react'); + return { + __esModule: true, + ...originalModule, + useBreakpointValue: jest.fn().mockImplementation(() => false), + }; +}); + +describe('FieldGroup Tests', () => { + test('should handle basic FieldGroup flow', async () => { + const fieldGroup = ; + rtlRender(fieldGroup); + expect(screen.getByText(title)).toBeInTheDocument(); + await waitFor(() => expect(document.title).toEqual('')); + }); +}); diff --git a/src/core/workbench/app/src/components/ProfileForm/__tests__/HeadingGroup.test.tsx b/src/core/workbench/app/src/components/ProfileForm/__tests__/HeadingGroup.test.tsx new file mode 100644 index 00000000..0a4f53e8 --- /dev/null +++ b/src/core/workbench/app/src/components/ProfileForm/__tests__/HeadingGroup.test.tsx @@ -0,0 +1,23 @@ +import { screen, rtlRender, waitFor } from '../../../test/test-utils'; +import { HeadingGroup } from '../HeadingGroup'; + +const title = 'Account Settings'; +const description = 'Change account details.'; + +jest.mock('@chakra-ui/react', () => { + const originalModule = jest.requireActual('@chakra-ui/react'); + return { + __esModule: true, + ...originalModule, + useBreakpointValue: jest.fn().mockImplementation(() => false), + }; +}); + +describe('HeadingGroup Tests', () => { + test('should handle basic HeadingGroup flow', async () => { + const headingGroup = ; + rtlRender(headingGroup); + expect(screen.getByText(title)).toBeInTheDocument(); + await waitFor(() => expect(document.title).toEqual('')); + }); +}); diff --git a/src/core/workbench/app/src/components/QuickLinks/QuickLink.tsx b/src/core/workbench/app/src/components/QuickLinks/QuickLink.tsx new file mode 100644 index 00000000..e970e439 --- /dev/null +++ b/src/core/workbench/app/src/components/QuickLinks/QuickLink.tsx @@ -0,0 +1,27 @@ +import { Button, Text } from '@chakra-ui/react'; + +export function QuickLink(props: any) { + function open(url: string) { + const newWindow = window.open(url, '_blank', 'noopener,noreferrer'); + if (newWindow) newWindow.opener = null; + } + + return ( + + ); +} diff --git a/src/core/workbench/app/src/components/QuickLinks/QuickLinks.tsx b/src/core/workbench/app/src/components/QuickLinks/QuickLinks.tsx new file mode 100644 index 00000000..f0a9c81a --- /dev/null +++ b/src/core/workbench/app/src/components/QuickLinks/QuickLinks.tsx @@ -0,0 +1,78 @@ +import { Box, Text, SimpleGrid } from '@chakra-ui/react'; +import { useContext } from 'react'; + +import { UserContext } from '../../context/UserContext'; +import { Datacoves } from '../Icons/Datacoves'; +import { DbtIcon } from '../Icons/Dbt'; +import { GithubIcon } from '../Icons/Github'; +import { GitlabIcon } from '../Icons/Gitlab'; +import { SqlIcon } from '../Icons/Sql'; +import { SupersetIcon } from '../Icons/Superset'; + +import { QuickLink } from './QuickLink'; + +export function QuickLinks() { + const { currentUser } = useContext(UserContext); + + return ( + + + Quick links + + + } + /> + } + /> + } + /> + {currentUser && currentUser.projects.find(Boolean)?.repository.provider === 'github' && ( + } + /> + )} + {currentUser && currentUser.projects.find(Boolean)?.repository.provider === 'gitlab' && ( + } + /> + )} + {currentUser && currentUser.projects.find(Boolean)?.repository.provider === 'bitbucket' && ( + } + /> + )} + {currentUser && + currentUser.permissions.find((permission) => + permission.includes('workbench:superset') + ) && ( + } + /> + )} + + + ); +} diff --git a/src/core/workbench/app/src/components/QuickLinks/__tests__/QuickLink.test.tsx b/src/core/workbench/app/src/components/QuickLinks/__tests__/QuickLink.test.tsx new file mode 100644 index 00000000..404f0655 --- /dev/null +++ b/src/core/workbench/app/src/components/QuickLinks/__tests__/QuickLink.test.tsx @@ -0,0 +1,18 @@ +import { screen, rtlRender } from '../../../test/test-utils'; +import { QuickLink } from '../QuickLink'; + +jest.mock('@chakra-ui/react', () => { + const originalModule = jest.requireActual('@chakra-ui/react'); + return { + __esModule: true, + ...originalModule, + useBreakpointValue: jest.fn().mockImplementation(() => false), + }; +}); + +describe('QuickLinks Tests', () => { + test('should handle basic QuickLinks flow', () => { + rtlRender(); + expect(screen.getByText('text')).toBeInTheDocument(); + }); +}); diff --git a/src/core/workbench/app/src/components/QuickLinks/index.ts b/src/core/workbench/app/src/components/QuickLinks/index.ts new file mode 100644 index 00000000..96e60476 --- /dev/null +++ b/src/core/workbench/app/src/components/QuickLinks/index.ts @@ -0,0 +1,2 @@ +export * from './QuickLinks'; +export * from './QuickLink'; diff --git a/src/core/workbench/app/src/components/Sidebar/DesktopSidebar.tsx b/src/core/workbench/app/src/components/Sidebar/DesktopSidebar.tsx new file mode 100644 index 00000000..7a80cfab --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/DesktopSidebar.tsx @@ -0,0 +1,88 @@ +import { Box, IconButton, Icon, Flex, useColorModeValue as mode } from '@chakra-ui/react'; +import { MouseEventHandler, ReactNode } from 'react'; +import { MdKeyboardArrowRight, MdKeyboardArrowLeft } from 'react-icons/md'; + +import { ExpandedSidebarContent } from './SidebarContent'; +import { MenuSection } from './utils'; + +interface DesktopSidebarContainerProps { + toggleSidebar: MouseEventHandler; + isCollapsed: boolean; + currentTab: string; + children: ReactNode; +} + +interface DesktopSidebarProps { + isCollapsed: boolean; + toggleSidebar: MouseEventHandler; + currentTab: string; + menu: MenuSection; + filteredMenu: string[]; +} + +const DesktopSidebarContainer = ({ + toggleSidebar, + isCollapsed, + currentTab, + children, +}: DesktopSidebarContainerProps) => ( + + + } + /> + + {children} + + +); + +const DesktopSidebar = ({ + isCollapsed, + toggleSidebar, + currentTab, + menu, + filteredMenu, +}: DesktopSidebarProps) => ( + + {!isCollapsed && } + +); + +export default DesktopSidebar; diff --git a/src/core/workbench/app/src/components/Sidebar/MobileSidebar.tsx b/src/core/workbench/app/src/components/Sidebar/MobileSidebar.tsx new file mode 100644 index 00000000..5a4bf6a9 --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/MobileSidebar.tsx @@ -0,0 +1,47 @@ +import { Box } from '@chakra-ui/layout'; +import { + Drawer, + DrawerOverlay, + DrawerContent, + DrawerCloseButton, + DrawerBody, +} from '@chakra-ui/modal'; + +import { MobileHamburgerMenu } from '../Header/MobileHamburgerMenu'; +import { NavMenu } from '../Header/NavMenu'; + +import { ExpandedSidebarContent } from './SidebarContent'; +import { MenuSection } from './utils'; + +interface Props { + isWorkbench: boolean; + onClose: () => void; + isOpen: boolean; + dark: boolean; + menu: MenuSection; + filteredMenu: string[]; +} + +const MobileSidebar = ({ isWorkbench, onClose, isOpen, dark, menu, filteredMenu }: Props) => ( + <> + + + + {isWorkbench ? ( + + ) : ( + + + + + + + + + + + )} + +); + +export default MobileSidebar; diff --git a/src/core/workbench/app/src/components/Sidebar/NavLink.tsx b/src/core/workbench/app/src/components/Sidebar/NavLink.tsx new file mode 100644 index 00000000..30669c6e --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/NavLink.tsx @@ -0,0 +1,41 @@ +import { HStack, Icon, Link, LinkProps, useColorModeValue as mode, Text } from '@chakra-ui/react'; +import * as React from 'react'; +import { Link as ReactRouterLink } from 'react-router-dom'; + +interface NavLinkProps extends LinkProps { + isActive?: boolean; + label: string; + icon: any; +} + +export const NavLink = (props: NavLinkProps) => { + const { icon, isActive, label, href, ...rest } = props; + return ( + + + + {label} + + + ); +}; diff --git a/src/core/workbench/app/src/components/Sidebar/PageSidebarContainer.tsx b/src/core/workbench/app/src/components/Sidebar/PageSidebarContainer.tsx new file mode 100644 index 00000000..90eac6de --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/PageSidebarContainer.tsx @@ -0,0 +1,19 @@ +import { Flex } from '@chakra-ui/layout'; +import { ReactNode } from 'react'; + +import Sidebar from './Sidebar'; + +interface Props { + children: ReactNode; +} + +const PageSidebarContainer = ({ children }: Props) => ( + + + + {children} + + +); + +export default PageSidebarContainer; diff --git a/src/core/workbench/app/src/components/Sidebar/Sidebar.tsx b/src/core/workbench/app/src/components/Sidebar/Sidebar.tsx new file mode 100644 index 00000000..ec8e61ba --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/Sidebar.tsx @@ -0,0 +1,85 @@ +import { useBreakpointValue } from '@chakra-ui/react'; +import { MouseEvent, useContext, useMemo, useState } from 'react'; + +import { AccountContext } from '../../context/AccountContext'; +import { TabsContext } from '../../context/TabsContext'; +import { UserContext } from '../../context/UserContext'; +import { usePersistedState } from '../../hooks/usePersistedState'; +import { getSiteContext } from '../../utils/siteContext'; + +import DesktopSidebar from './DesktopSidebar'; +import MobileSidebar from './MobileSidebar'; +import { getAccountAdministrationMenu } from './utils'; + +export enum SidebarVariant { + Mobile = 'Mobile', + Desktop = 'Desktop', +} + +const Sidebar = () => { + const { env } = getSiteContext(); + const { currentUser } = useContext(UserContext); + const { currentAccount } = useContext(AccountContext); + const { currentTab } = useContext(TabsContext); + const [isCollapsed, setIsCollapsed] = usePersistedState('sidebarCollapsed', false); + const [isMobileSidebarOpen, setMobileSidebarOpen] = useState(false); + + const isWorkbench = useMemo(() => window.location.host.split('.')[0] === env, [env]); + + const sidebarVariant = + useBreakpointValue( + { base: SidebarVariant.Mobile, md: SidebarVariant.Desktop }, + { ssr: false } + ) || SidebarVariant.Desktop; + + const containsAdminPermissions = currentUser?.permissions.join(':').includes('admin'); + + const dark = ['docs', 'load', 'transform', 'observe', 'orchestrate'].includes(currentTab); + + const menu = getAccountAdministrationMenu(currentUser, currentAccount); + + // Do not show empty sections + const filteredMenu = Object.keys(menu).filter( + (key) => menu[key].filter(({ shouldRender }) => shouldRender).length + ); + + const shouldShowSidebar = + !!(filteredMenu.length && currentUser && containsAdminPermissions) && !isWorkbench; + + const shouldShowDesktopSidebar = sidebarVariant === SidebarVariant.Desktop && !isWorkbench; + + const toggleSidebar = (e: MouseEvent) => { + e.preventDefault(); + setIsCollapsed((prev: boolean) => !prev); + }; + + const toggleMobileSidebar = () => setMobileSidebarOpen((prevState) => !prevState); + + return ( + <> + {shouldShowSidebar && + (shouldShowDesktopSidebar ? ( + + ) : ( + sidebarVariant === SidebarVariant.Mobile && ( + + ) + ))} + + ); +}; + +export default Sidebar; diff --git a/src/core/workbench/app/src/components/Sidebar/SidebarContent.tsx b/src/core/workbench/app/src/components/Sidebar/SidebarContent.tsx new file mode 100644 index 00000000..5496a173 --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/SidebarContent.tsx @@ -0,0 +1,55 @@ +import { + Accordion, + AccordionItem, + AccordionButton, + AccordionIcon, + AccordionPanel, + Heading, + Text, +} from '@chakra-ui/react'; +import { useLocation } from 'react-router'; + +import { NavLink } from './NavLink'; +import { MenuSection } from './utils'; + +interface ExpandedProps { + menuItems: MenuSection; + filteredMenuItems: string[]; +} + +export const ExpandedSidebarContent = ({ menuItems, filteredMenuItems }: ExpandedProps) => { + const itemNumber = Object.keys(menuItems).length; + const { pathname } = useLocation(); + return ( + <> + + Account Administration + + + {filteredMenuItems.map((key) => ( + + + + + {key.toUpperCase()} + + + + {menuItems[key] + .filter(({ shouldRender }) => shouldRender) + .map(({ heading, navigateTo, icon }) => ( + = 0} + key={heading} + /> + ))} + + + ))} + + + ); +}; diff --git a/src/core/workbench/app/src/components/Sidebar/index.ts b/src/core/workbench/app/src/components/Sidebar/index.ts new file mode 100644 index 00000000..c167c49f --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/index.ts @@ -0,0 +1 @@ +export * from './Sidebar'; diff --git a/src/core/workbench/app/src/components/Sidebar/utils.ts b/src/core/workbench/app/src/components/Sidebar/utils.ts new file mode 100644 index 00000000..2b62fb60 --- /dev/null +++ b/src/core/workbench/app/src/components/Sidebar/utils.ts @@ -0,0 +1,177 @@ +import { BsEnvelope } from 'react-icons/bs'; +import { FaConnectdevelop } from 'react-icons/fa'; +import { HiOutlineKey, HiOutlineUserGroup } from 'react-icons/hi'; +import { IconType } from 'react-icons/lib'; +import { MdAttachMoney, MdCable, MdOutlineDashboard, MdOutlineLockOpen } from 'react-icons/md'; +import { RiHomeGearLine, RiPlugLine, RiUser3Line, RiUserSettingsLine } from 'react-icons/ri'; + +import { Account } from '../../context/AccountContext/types'; +import { User } from '../../context/UserContext/types'; + +export interface MenuSection { + [key: string]: AccountItem[]; +} + +export interface AccountItem { + shouldRender: boolean; + heading: string; + text: string; + navigateTo: string; + icon: IconType; +} + +export const getAccountAdministrationMenu = ( + currentUser: User | undefined, + currentAccount: Account | undefined +): MenuSection => ({ + 'Users & Groups': [ + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_groups && + currentUser.permissions.find((permission) => permission.includes('admin:groups|write')) + ), + heading: 'Groups', + text: 'Add groups and manage permissions', + navigateTo: '/admin/groups', + icon: MdOutlineLockOpen, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_invitations && + currentUser.permissions.find((permission) => permission.includes('admin:invitations|write')) + ), + heading: 'Invitations', + text: 'Invite users and manage access', + navigateTo: '/admin/invitations', + icon: BsEnvelope, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_users && + currentUser.permissions.find((permission) => permission.includes('admin:users|write')) + ), + heading: 'Users', + text: 'Add users and assign groups', + navigateTo: '/admin/users', + icon: RiUser3Line, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_profiles && + currentUser.permissions.find((permission) => permission.includes('admin:profiles|write')) + ), + heading: 'Profiles', + text: 'Configure and manage profiles', + navigateTo: '/admin/profiles', + icon: HiOutlineUserGroup, + }, + ], + 'Projects & Environments': [ + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_projects && + currentUser.permissions.find((permission) => permission.includes('admin:projects|write')) + ), + heading: 'Projects', + text: 'Add and manage projects', + navigateTo: '/admin/projects', + icon: MdOutlineDashboard, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_environments && + currentUser.permissions.find((permission) => + permission.includes('admin:environments|write') + ) + ), + heading: 'Environments', + text: 'Configure and manage environments', + navigateTo: '/admin/environments', + icon: RiHomeGearLine, + }, + ], + Credentials: [ + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_connections && + currentUser.permissions.find((permission) => + permission.includes('admin:connectiontemplates|write') + ) + ), + heading: 'Connection templates', + text: 'Configure default connection templates', + navigateTo: '/admin/connection-templates', + icon: MdCable, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_service_credentials && + currentUser.permissions.find((permission) => + permission.includes('admin:servicecredentials|write') + ) + ), + heading: 'Service connections', + text: 'Configure services connections', + navigateTo: '/admin/service-connections', + icon: RiPlugLine, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_integrations && + currentUser.permissions.find((permission) => + permission.includes('admin:integrations|write') + ) + ), + heading: 'Integrations', + text: 'Configure integrations', + navigateTo: '/admin/integrations', + icon: FaConnectdevelop, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_secrets && + currentUser.permissions.find((permission) => permission.includes('admin:secrets|write')) + ), + heading: 'Secrets', + text: 'Configure secrets', + navigateTo: '/admin/secrets', + icon: HiOutlineKey, + }, + ], + 'Account & Billing': [ + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_account && + currentAccount?.owned_by === currentUser.email && + !currentUser?.features.accounts_signup + ), + heading: 'Account settings', + text: 'Change account global settings', + navigateTo: '/admin/account', + icon: RiUserSettingsLine, + }, + { + shouldRender: !!( + currentUser && + currentUser?.features.admin_account && + currentAccount?.owned_by === currentUser.email && + currentUser?.features.accounts_signup + ), + heading: 'Account settings & billing', + text: 'Change account billing and settings', + navigateTo: '/admin/account', + icon: MdAttachMoney, + }, + ], +}); diff --git a/src/core/workbench/app/src/components/TagInput.tsx b/src/core/workbench/app/src/components/TagInput.tsx new file mode 100644 index 00000000..ccc8ba89 --- /dev/null +++ b/src/core/workbench/app/src/components/TagInput.tsx @@ -0,0 +1,97 @@ +import { Input } from '@chakra-ui/input'; +import { Box, Flex } from '@chakra-ui/layout'; +import { Button, HStack } from '@chakra-ui/react'; +import { Tag, TagCloseButton, TagLabel } from '@chakra-ui/tag'; +import { useState, FC, useEffect } from 'react'; + +interface TagInputProps { + placeholder: string; + data?: string[]; +} + +export const TagInput: FC = ({ data, placeholder }) => { + const [input, setInput] = useState(''); + const [tags, setTags] = useState([]); + const [isKeyReleased, setIsKeyReleased] = useState(false); + + useEffect(() => { + if (data !== undefined) setTags(data); + }, [data]); + + const onChange = (e: any) => { + const { value } = e.target; + setInput(value); + }; + + const handlerClick = () => { + const trimmedInput = input.trim(); + if (trimmedInput.length && !tags.includes(trimmedInput)) { + setTags((prevState) => [...prevState, trimmedInput]); + setInput(''); + } + }; + + const onKeyDown = (e: any) => { + const { key } = e; + const trimmedInput = input.trim(); + + if ((key === ',' || key === 'Enter') && trimmedInput.length && !tags.includes(trimmedInput)) { + e.preventDefault(); + setTags((prevState) => [...prevState, trimmedInput]); + setInput(''); + } + + if (key === 'Backspace' && !input.length && tags.length && isKeyReleased) { + const tagsCopy: string[] = [...tags]; + const poppedTag: string = tagsCopy.pop() || ''; + e.preventDefault(); + setTags(tagsCopy); + setInput(poppedTag); + } + setIsKeyReleased(false); + }; + + const onKeyUp = () => { + setIsKeyReleased(true); + }; + + const deleteTag = (index: number) => { + setTags((prevState) => prevState.filter((tag, i) => i !== index)); + }; + + return ( + + + + + {/* */} + + + + {tags.map((tag, index) => ( + + {tag} + deleteTag(index)} /> + + ))} + + + ); +}; diff --git a/src/core/workbench/app/src/components/TestButton.tsx b/src/core/workbench/app/src/components/TestButton.tsx new file mode 100644 index 00000000..3cca7e4b --- /dev/null +++ b/src/core/workbench/app/src/components/TestButton.tsx @@ -0,0 +1,36 @@ +import { CheckIcon } from '@chakra-ui/icons'; +import { Box, Button, ButtonProps, Tooltip } from '@chakra-ui/react'; +import { formatRelative, parseISO } from 'date-fns'; +import { MdFlashOn } from 'react-icons/md'; + +interface TestButtonProps extends ButtonProps { + testType: string; + validatedAt: string | undefined; +} + +export const TestButton = ({ testType, validatedAt, ...buttonProps }: TestButtonProps) => { + const tested = !!validatedAt; + return ( + + + + ); +}; diff --git a/src/core/workbench/app/src/components/UserGroups/AllEnvGroups.tsx b/src/core/workbench/app/src/components/UserGroups/AllEnvGroups.tsx new file mode 100644 index 00000000..8c86a061 --- /dev/null +++ b/src/core/workbench/app/src/components/UserGroups/AllEnvGroups.tsx @@ -0,0 +1,30 @@ +import { Heading, SimpleGrid } from '@chakra-ui/react'; + +import { Group } from '../../features/admin/groups/types'; + +import { GroupCheckboxItem } from './CheckboxItem'; +import { ProjectGroupType } from './utils'; + +interface Props { + projectsGroup: ProjectGroupType; + projectKey: string; +} + +const AllEnvGroups = ({ projectsGroup, projectKey }: Props) => { + const { permissions: projects } = projectsGroup[projectKey]; + + return ( + <> + + {'All environments'} + + + {projects.map((group: Group) => ( + + ))} + + + ); +}; + +export default AllEnvGroups; diff --git a/src/core/workbench/app/src/components/UserGroups/CheckboxItem.tsx b/src/core/workbench/app/src/components/UserGroups/CheckboxItem.tsx new file mode 100644 index 00000000..332b1334 --- /dev/null +++ b/src/core/workbench/app/src/components/UserGroups/CheckboxItem.tsx @@ -0,0 +1,71 @@ +import { QuestionOutlineIcon } from '@chakra-ui/icons'; +import { HStack, Tooltip, GridItemProps, GridItem } from '@chakra-ui/react'; +import { CheckboxControl } from 'formik-chakra-ui'; +import { ChangeEvent } from 'react'; + +import { Group } from '../../features/admin/groups/types'; + +import { generalGroups, labelGroup } from './utils'; + +interface Props { + name: string; + value: string; + label: string; + description: string; + onChange?: (e: ChangeEvent) => void; + styleProps?: GridItemProps; + isDisabled?: boolean; +} + +interface GroupProps { + group: Group; + onChange?: (e: ChangeEvent) => void; + isDisabled?: boolean; +} + +const CheckboxItem = ({ + name, + value, + label, + description, + onChange, + styleProps, + isDisabled, +}: Props) => { + return ( + + + + {label} + + + + + + + ); +}; + +export const GroupCheckboxItem = ({ group, onChange, isDisabled }: GroupProps) => { + const label = labelGroup(group.extended_group.name); + return ( + + ); +}; + +export default CheckboxItem; diff --git a/src/core/workbench/app/src/components/UserGroups/SingleEnvGroups.tsx b/src/core/workbench/app/src/components/UserGroups/SingleEnvGroups.tsx new file mode 100644 index 00000000..79de098e --- /dev/null +++ b/src/core/workbench/app/src/components/UserGroups/SingleEnvGroups.tsx @@ -0,0 +1,48 @@ +import { Box, Heading, SimpleGrid } from '@chakra-ui/layout'; + +import { Group } from '../../features/admin/groups/types'; + +import { GroupCheckboxItem } from './CheckboxItem'; +import { GroupFormValues, labelGroup, ProjectGroupType } from './utils'; + +interface Props { + projectsGroup: ProjectGroupType; + projectKey: string; + values: GroupFormValues; +} + +const SingleEnvGroups = ({ projectsGroup, projectKey, values }: Props) => { + const { environments: projectEnvs, permissions: projects } = projectsGroup[projectKey]; + + const selectedProjectGroups = projects + .filter(({ id }) => values.groups.includes(id.toString())) + .map((group) => labelGroup(group.extended_group.name)); + + const isProjectGroupSelected = (group: Group) => + selectedProjectGroups.includes(labelGroup(group.extended_group.name)); + + return ( + <> + {Object.keys(projectEnvs)?.map((key) => { + return ( + + + {`Only ${key}`} + + + {projectEnvs[key].map((group: Group) => ( + + ))} + + + ); + })} + + ); +}; + +export default SingleEnvGroups; diff --git a/src/core/workbench/app/src/components/UserGroups/UserGroups.tsx b/src/core/workbench/app/src/components/UserGroups/UserGroups.tsx new file mode 100644 index 00000000..182296b0 --- /dev/null +++ b/src/core/workbench/app/src/components/UserGroups/UserGroups.tsx @@ -0,0 +1,67 @@ +import { + Accordion, + AccordionButton, + AccordionIcon, + AccordionItem, + AccordionPanel, + Box, + Heading, + SimpleGrid, +} from '@chakra-ui/react'; +import React from 'react'; + +import { Group } from '../../features/admin/groups/types'; + +import AllEnvGroups from './AllEnvGroups'; +import { GroupCheckboxItem } from './CheckboxItem'; +import SingleEnvGroups from './SingleEnvGroups'; +import { GroupFormValues, ProjectGroupType } from './utils'; + +interface Props { + projectsGroup: ProjectGroupType; + accountGroup: Group[] | undefined; + values: GroupFormValues; + accountName: string | undefined; +} + +export const UserGroups = ({ projectsGroup, accountGroup, values, accountName }: Props) => { + return ( + + + {`${accountName} Account`} + + + {accountGroup?.map((group: Group) => ( + + ))} + + + + {Object.keys(projectsGroup).map((project) => { + return ( + +

+ + + {`${project} Project`} + + + +

+ + + + +
+ ); + })} +
+
+ ); +}; + +export default UserGroups; diff --git a/src/core/workbench/app/src/components/UserGroups/index.ts b/src/core/workbench/app/src/components/UserGroups/index.ts new file mode 100644 index 00000000..6fe89016 --- /dev/null +++ b/src/core/workbench/app/src/components/UserGroups/index.ts @@ -0,0 +1 @@ +export * from './UserGroups'; diff --git a/src/core/workbench/app/src/components/UserGroups/utils.ts b/src/core/workbench/app/src/components/UserGroups/utils.ts new file mode 100644 index 00000000..f5ce8ea1 --- /dev/null +++ b/src/core/workbench/app/src/components/UserGroups/utils.ts @@ -0,0 +1,77 @@ +import { groupBy } from 'lodash'; + +import { Group } from '../../features/admin/groups/types'; + +export interface GroupFormValues { + name: string; + email: string; + groups: string[]; +} + +export type ProjectGroupType = Record< + string, + { permissions: Group[]; environments: { [key: string]: Group[] } } +>; + +export const generalGroups: Record = { + Developer: 'Developer', + SysAdmin: 'Sys Admin', + Viewer: 'Viewer', + AccountAdmin: 'Account Admin', + AccountDefault: 'Account Default', +}; + +export const getProjectsList = (groups: Group[] | undefined) => + (groups || [])?.filter( + ({ extended_group }) => !!extended_group.project && !extended_group.environment + ); + +export const getEnvironmentList = (groups: Group[] | undefined) => + (groups || [])?.filter(({ extended_group }) => !!extended_group.environment); + +export const addProjectsIntoProjectGroup = (groups: Group[] | undefined) => { + const projects = groupBy(getProjectsList(groups), 'extended_group.project.name'); + const grouppedProjects: ProjectGroupType = {}; + Object.keys(projects).forEach((key) => { + grouppedProjects[key] = { + permissions: projects[key], + environments: { + ...groupBy( + getEnvironmentList(groups).filter( + ({ extended_group }) => + extended_group.environment?.project === projects[key][0].extended_group.project?.id + ), + (group) => createGroupName(group) + ), + }, + }; + }); + + return grouppedProjects; +}; + +export const labelGroup = (group: string): string => { + if (group.endsWith('Developer')) { + return generalGroups.Developer; + } + if (group.endsWith('Sys Admin')) { + return generalGroups.SysAdmin; + } + if (group.endsWith('Viewer')) { + return generalGroups.Viewer; + } + if (group.endsWith('Account Admin')) { + return generalGroups.AccountAdmin; + } + if (group.endsWith('Account Default')) { + return generalGroups.AccountDefault; + } + return group; +}; + +export const createGroupName = (group: Group) => { + if (group.extended_group.environment) + return `${group.extended_group.environment.name} (${group.extended_group.environment.slug})`; + if (group.extended_group.project) return group.extended_group.project.name; + return group.name; +}; diff --git a/src/core/workbench/app/src/components/WorkbenchTab/WorkbenchTab.tsx b/src/core/workbench/app/src/components/WorkbenchTab/WorkbenchTab.tsx new file mode 100644 index 00000000..351485cb --- /dev/null +++ b/src/core/workbench/app/src/components/WorkbenchTab/WorkbenchTab.tsx @@ -0,0 +1,43 @@ +import { Box } from '@chakra-ui/react'; +import React, { useEffect, useContext, useState } from 'react'; + +import { TabsContext } from '../../context/TabsContext'; + +export function WorkbenchTab(props: any) { + const { currentTab } = useContext(TabsContext); + const [loaded, setLoaded] = useState(false); + + useEffect(() => { + if (props.name === currentTab) { + if (!props.isLoading && !loaded) { + setLoaded(true); + } + if (props.isLoading && loaded) { + setLoaded(false); + } + } + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [currentTab, props]); + + return ( + + {props.sidebar} + {loaded && ( + + )} + + ); +} diff --git a/src/core/workbench/app/src/components/WorkbenchTab/index.ts b/src/core/workbench/app/src/components/WorkbenchTab/index.ts new file mode 100644 index 00000000..faebbc90 --- /dev/null +++ b/src/core/workbench/app/src/components/WorkbenchTab/index.ts @@ -0,0 +1 @@ +export * from './WorkbenchTab'; diff --git a/src/core/workbench/app/src/components/utils.tsx b/src/core/workbench/app/src/components/utils.tsx new file mode 100644 index 00000000..ff072348 --- /dev/null +++ b/src/core/workbench/app/src/components/utils.tsx @@ -0,0 +1,93 @@ +import { Code } from '@chakra-ui/react'; + +export const TIPS = [ + { service: 'VS Code', tip: 'Drag and drop tabs in the editor to rearrange them' }, + { service: 'VS Code', tip: 'Use `Cmd/Ctrl` + ``` to open the terminal' }, + { service: 'VS Code', tip: 'Use `Cmd/Ctrl` + `shift` + ``` to open a new terminal.' }, + { + service: 'VS Code', + tip: 'Use the `Cmd/Ctrl` + `Enter` shortcut to preview Common Table Expressions (CTEs).', + }, + { + service: 'VS Code', + tip: 'Use the Format SQL button in the status bar to quickly format your file using sqlfluff', + }, + { service: 'VS Code', tip: 'Quickly close models with `Cmd/Ctrl` + `Option/Alt` + `W`' }, + { + service: 'VS Code', + tip: '`git br` is the alias for `git branch`. Use `git br` to see available branches.', + }, + { + service: 'VS Code', + tip: '`git co` is the alias for `git checkout`. Quickly switch to another branch with `git co `.', + }, + { + service: 'VS Code', + tip: '`git l` is the alias for `git log`. View the commit log with `git l`.', + }, + { + service: 'VS Code', + tip: '`git st` is the alias for `git status`. Check the Git status of your repository using `git st`.', + }, + { + service: 'VS Code', + tip: 'Use `git po` to Pull changes from the main branch into your local branch', + }, + { + service: 'VS Code', + tip: 'Use `git prune-branches` to delete local branches that have been deleted on the remote server.', + }, + { + service: 'VS Code', + tip: 'Use `dbt-coves generate airflow-dags —` to generate your python DAGs with YAML', + }, + { + service: 'VS Code', + tip: 'One-to-one schema:model relationships improves dbt project organization.', + }, + { + service: 'VS Code', + tip: 'Use `dbt-coves generate sources` to dynamically your source files and models.', + }, + { + service: 'VS Code', + tip: "Use autocomplete for 'ref' and 'source' commands. Simply start typing `ref` or `source`, then press 'Tab' to quickly insert them.", + }, + { + service: 'VS Code', + tip: 'No need to switch windows for Snowflake. Use the snowflake extension to simplify your workflow. You can preview data, query the data, make use of autocomplete and more right from your IDE!', + }, + { + service: 'Datacoves Docs', + tip: 'No need to leave the platform to search for our documentation. Use the Docs tab to see the official Datacoves documentation.', + }, + { service: 'DBT Docs', tip: 'View local and production DBT docs under the Observe tab' }, + { + service: 'VS Code', + tip: 'Use the Datacoves Power User extension to see/run tests, parent or child models. Click on the extension to bring up the pane. ', + }, + { + service: 'VS Code', + tip: "Don't worry about always needing to hit `Cmd/Ctrl` + `S`. The VS Code browser autosaves your work.", + }, + { + service: 'VS Code', + tip: 'Run `dbt docs generate` then click the observe tab > local docs to preview documentation changes based on your current branch.', + }, +]; + +const regex = { + code: /(`.*?`)/g, +}; + +export const replaceJSX = (text: string) => { + return text.split(regex.code).map((str, index) => + str.search(regex.code) !== -1 ? ( + + {str.substr(1, str.length - 2)} + + ) : ( + str + ) + ); +}; diff --git a/src/core/workbench/app/src/config/__mocks__/dom.js b/src/core/workbench/app/src/config/__mocks__/dom.js new file mode 100644 index 00000000..5650dc25 --- /dev/null +++ b/src/core/workbench/app/src/config/__mocks__/dom.js @@ -0,0 +1,5 @@ +import { JSDOM } from 'jsdom'; +const dom = new JSDOM(); +global.document = dom.window.document; +global.location.hostname = "dev123.datacoveslocal.com"; +global.window = dom.window; diff --git a/src/core/workbench/app/src/config/index.ts b/src/core/workbench/app/src/config/index.ts new file mode 100644 index 00000000..ee5be681 --- /dev/null +++ b/src/core/workbench/app/src/config/index.ts @@ -0,0 +1,13 @@ +const regex = /^[^.]+/; +const suffix = location.hostname.match(regex)?.shift(); +export const IS_WORKBENCH = suffix && suffix.length === 6; +export const ENV_SLUG = IS_WORKBENCH ? suffix : undefined; +export const HOST = location.hostname; + +const apiUrl = IS_WORKBENCH ? location.hostname.replace(regex, 'api') : 'api.' + location.hostname; +export const API_URL = `https://${apiUrl}`; +export const WS_URL = `wss://${apiUrl}`; + +export const LAUNCHPAD_HOST = IS_WORKBENCH + ? location.hostname.replace(/^[^.]+\./, '') + : location.hostname; diff --git a/src/core/workbench/app/src/context/AccountContext/AccountContext.tsx b/src/core/workbench/app/src/context/AccountContext/AccountContext.tsx new file mode 100644 index 00000000..73381834 --- /dev/null +++ b/src/core/workbench/app/src/context/AccountContext/AccountContext.tsx @@ -0,0 +1,12 @@ +import { createContext } from 'react'; + +import { Account, IAccountContext } from './types'; + +export const AccountContext = createContext({ + currentAccount: undefined, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + setCurrentAccount: (account: Account | undefined) => {}, + accounts: [], + // eslint-disable-next-line @typescript-eslint/no-unused-vars + setAccounts: (accounts: Account[]) => {}, +}); diff --git a/src/core/workbench/app/src/context/AccountContext/api/getUserAccounts.ts b/src/core/workbench/app/src/context/AccountContext/api/getUserAccounts.ts new file mode 100644 index 00000000..fac0984d --- /dev/null +++ b/src/core/workbench/app/src/context/AccountContext/api/getUserAccounts.ts @@ -0,0 +1,12 @@ +import { useQuery } from 'react-query'; + +import { axios } from '../../../lib/axios'; +import { Account } from '../types'; + +export const getUserAccounts = (): Promise => { + return axios.get('api/iam/accounts'); +}; + +export const useGetUserAccounts = (options?: any) => { + return useQuery(['getUserAccounts'], async () => await getUserAccounts(), options); +}; diff --git a/src/core/workbench/app/src/context/AccountContext/index.ts b/src/core/workbench/app/src/context/AccountContext/index.ts new file mode 100644 index 00000000..54a0de92 --- /dev/null +++ b/src/core/workbench/app/src/context/AccountContext/index.ts @@ -0,0 +1 @@ +export * from './AccountContext'; diff --git a/src/core/workbench/app/src/context/AccountContext/types/index.ts b/src/core/workbench/app/src/context/AccountContext/types/index.ts new file mode 100644 index 00000000..346fd9f3 --- /dev/null +++ b/src/core/workbench/app/src/context/AccountContext/types/index.ts @@ -0,0 +1,26 @@ +export type Plan = { + name: string; + slug: string; + billing_period: string; + trial_period_days: number; + kind: string; +}; + +export type Account = { + name: string; + slug: string; + owned_by: string; + subscription_id?: string; + plan?: Plan; + remaining_trial_days: number; + trial_ends_at?: string; + has_environments: boolean; + is_suspended: boolean; +}; + +export interface IAccountContext { + currentAccount: Account | undefined; + setCurrentAccount: (account: Account | undefined) => void; + accounts: Account[] | undefined; + setAccounts: (accounts: Account[]) => void; +} diff --git a/src/core/workbench/app/src/context/ObserveSubTabsContext/ObserveSubTabsContext.tsx b/src/core/workbench/app/src/context/ObserveSubTabsContext/ObserveSubTabsContext.tsx new file mode 100644 index 00000000..50c1e1ce --- /dev/null +++ b/src/core/workbench/app/src/context/ObserveSubTabsContext/ObserveSubTabsContext.tsx @@ -0,0 +1,7 @@ +import { createContext } from 'react'; + +export const ObserveSubTabsContext = createContext({ + currentSubTab: '', + // eslint-disable-next-line @typescript-eslint/no-unused-vars + setCurrentSubTab: (subTabId: string) => {}, +}); diff --git a/src/core/workbench/app/src/context/ObserveSubTabsContext/index.ts b/src/core/workbench/app/src/context/ObserveSubTabsContext/index.ts new file mode 100644 index 00000000..65ea9cde --- /dev/null +++ b/src/core/workbench/app/src/context/ObserveSubTabsContext/index.ts @@ -0,0 +1 @@ +export * from './ObserveSubTabsContext'; diff --git a/src/core/workbench/app/src/context/TabsContext/TabsContext.tsx b/src/core/workbench/app/src/context/TabsContext/TabsContext.tsx new file mode 100644 index 00000000..522f4e68 --- /dev/null +++ b/src/core/workbench/app/src/context/TabsContext/TabsContext.tsx @@ -0,0 +1,7 @@ +import { createContext } from 'react'; + +export const TabsContext = createContext({ + currentTab: '', + // eslint-disable-next-line @typescript-eslint/no-unused-vars + setCurrentTab: (tabId: string) => {}, +}); diff --git a/src/core/workbench/app/src/context/TabsContext/index.ts b/src/core/workbench/app/src/context/TabsContext/index.ts new file mode 100644 index 00000000..affda74e --- /dev/null +++ b/src/core/workbench/app/src/context/TabsContext/index.ts @@ -0,0 +1 @@ +export * from './TabsContext'; diff --git a/src/core/workbench/app/src/context/UIProvider.tsx b/src/core/workbench/app/src/context/UIProvider.tsx new file mode 100644 index 00000000..b7e4b44e --- /dev/null +++ b/src/core/workbench/app/src/context/UIProvider.tsx @@ -0,0 +1,278 @@ +import { ChakraProvider, useToast } from '@chakra-ui/react'; +import * as Sentry from '@sentry/react'; +import React, { useState, useEffect, useRef, createElement } from 'react'; +import { useErrorHandler } from 'react-error-boundary'; +import { BrowserRouter as Router } from 'react-router-dom'; + +import ExpandibleToast from '../components/ExpandibleToast/ExpandibleToast'; +import { API_URL, WS_URL } from '../config'; +import { useGetUserAccounts } from '../context/AccountContext/api/getUserAccounts'; +import { Account } from '../context/AccountContext/types'; +import { useGetUserInfo } from '../context/UserContext/api/getUserInfo'; +import { User } from '../context/UserContext/types'; +import { WebSocketContext } from '../features/global/websocket/WebSocketContext'; +import { usePersistedState } from '../hooks/usePersistedState'; +import { main } from '../themes'; +import { getSiteContext } from '../utils/siteContext'; + +import { AccountContext } from './AccountContext'; +import { ObserveSubTabsContext } from './ObserveSubTabsContext'; +import { TabsContext } from './TabsContext'; +import { HasTabAccess, UserContext } from './UserContext'; + +type AppProviderProps = { + children: React.ReactNode; +}; + +const isPublicPage = () => { + // Public pages should not retrieve data from api + return window.location.pathname === '/sign-in'; +}; + +export const UIProvider = ({ children }: AppProviderProps) => { + const toast = useToast(); + const [currentUser, setCurrentUser] = useState(); + const [currentTab, setCurrentTab] = usePersistedState('currentTab', 'docs'); + const [currentObserveSubTab, setCurrentObserveSubTab] = usePersistedState( + 'currentObserveSubTab', + 'local-dbt-docs' + ); + const handleError = useErrorHandler(); + const [currentAccount, setAccount] = useState(); + const [accountSlug, setAccountSlug] = usePersistedState('accountSlug', undefined); + const [accounts, setAccounts] = useState(); + const [webSocket, setWebSocket] = useState(); + const [webSocketFailed, setWebSocketFailed] = useState(false); + const [isWebSocketReady, setWebSocketReady] = useState(false); + const [envStatusMessages, setEnvStatusMessages] = useState([]); + const [socketMessages, setSocketMessages] = useState([]); + const siteContext = getSiteContext(); + const socketAttemptsConnection = useRef(1); + const webSocketUrl = useRef(''); + + const redirectOnError = (err: any) => { + if (err.response && [401, 403].includes(err.response.status)) { + window.location.href = `${API_URL}/iam/login?next=${window.location}`; + } else { + handleError(err); + } + }; + + const datacovesSocket = () => { + if (webSocket && webSocket.readyState == WebSocket.OPEN) { + webSocket.close(); + } + + const socket = new WebSocket(webSocketUrl.current); + socket.onopen = () => { + console.log('[Websocket] opened.'); + setWebSocket(socket); + setWebSocketReady(true); + setWebSocketFailed(false); + socketAttemptsConnection.current = 1; + }; + + socket.onclose = (event) => { + if (!event.wasClean && socketAttemptsConnection.current) { + socketAttemptsConnection.current++; + setTimeout(() => datacovesSocket(), 1000); + } else { + console.log('[Websocket] closed.'); + } + + setWebSocketReady(false); + }; + + socket.onerror = (error) => { + console.error('[Websocket]', error); + socket.close(); + setWebSocketFailed(true); + }; + + socket.onmessage = (event) => { + const receivedMessage = JSON.parse(event.data); + const messageType = receivedMessage['message_type']; + const message = receivedMessage['message']; + + // Store websocket messages in state + setSocketMessages((messages) => { + const socketMessages = messages.filter((msg: any) => msg.message_type !== messageType); + return [...socketMessages, receivedMessage]; + }); + + switch (messageType) { + case 'user.toast': { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: message['content'], + extra: message['extra'], + status: message['status'], + }); + }, + isClosable: true, + }); + break; + } + case 'env.heartbeat': { + if (!message['code_server_active']) { + /* + We send this notifacton only once when the code server pod is not running. + Since the startup process takes a bit longer. + */ + setTimeout(() => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Workspace loading', + extra: + 'We are setting up your VS-Code. You will be able to access it in a few seconds.', + status: 'info', + }); + }, + isClosable: true, + position: 'bottom', + duration: 7000, + }); + }, 2000); // 2 seconds + } + break; + } + case 'env.status': + setEnvStatusMessages((messages) => { + const updatedMessages = messages.filter((msg: any) => msg.env !== message.env); + return [...updatedMessages, message]; + }); + break; + + default: + console.log('Unknown websocket message:', receivedMessage); + } + }; + }; + + useEffect(() => { + if (currentAccount) { + const wsUrl = `${WS_URL}/ws/account/${currentAccount.slug}/`; + if (webSocketUrl.current !== wsUrl) { + webSocketUrl.current = wsUrl; + datacovesSocket(); + } + } + + // eslint-disable-next-line react-hooks/exhaustive-deps + }, [currentAccount]); + + const sendMessageBySocket = async (message: object) => { + waitForSocketConnection(() => webSocket?.send(JSON.stringify(message))); + }; + + const waitForSocketConnection = (callback: () => void, attempts = 20) => { + if (webSocket?.readyState === WebSocket.OPEN) { + callback(); + } else if (attempts > 0) { + setTimeout(() => waitForSocketConnection(callback, attempts - 1), 1000); + } else { + console.error('[Websocket] Could not establish connection.'); + } + }; + + const getEnvStatus = (envSlug: string) => { + const items = envStatusMessages.filter((item) => Object.assign(item).env === envSlug); + return items.length > 0 ? items[0] : {}; + }; + + const getSocketMessage = (messageType: string) => { + const items = socketMessages.filter((item) => Object.assign(item).message_type === messageType); + const message = items.length > 0 ? items[0] : null; + return message; + }; + + useGetUserInfo( + { + account: currentAccount?.slug, + environment: siteContext.env, + }, + { + onSuccess: (resp: User) => { + setCurrentUser(resp); + // Sentry tags + Sentry.setTag('user.slug', resp.slug); + Sentry.setTag('account.slug', currentAccount?.slug); + // Google Analytics properties + if (window.gtag) { + window.gtag('config', 'G-X8V16WM99D', { + user_id: resp.slug, + }); + window.gtag('set', 'user_properties', { + user_slug: resp.slug, + account_slug: currentAccount?.slug, + }); + } + }, + onError: redirectOnError, + enabled: !isPublicPage() && !!accounts, + } + ); + + useGetUserAccounts({ + onSuccess: (accounts: Account[]) => { + setAccounts(accounts); + if (accounts.length > 0) { + if (accountSlug) { + const storedAccount = accounts.filter((acc) => acc.slug === accountSlug); + if (storedAccount.length > 0) { + setAccount(storedAccount[0]); + } else { + setAccount(accounts[0]); + } + } else { + setAccount(accounts[0]); + } + } + }, + onError: redirectOnError, + enabled: !isPublicPage(), + }); + + useEffect(() => { + if (currentUser && !HasTabAccess(currentUser, currentTab)) { + setCurrentTab('docs'); + } + }, [currentTab, currentUser, setCurrentTab]); + + function setCurrentAccount(account: Account | undefined) { + setAccount(account); + setAccountSlug(account?.slug); + } + + return ( + + + + + + + {children} + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/context/UserContext/UserContext.tsx b/src/core/workbench/app/src/context/UserContext/UserContext.tsx new file mode 100644 index 00000000..120a7b9f --- /dev/null +++ b/src/core/workbench/app/src/context/UserContext/UserContext.tsx @@ -0,0 +1,83 @@ +import { createContext } from 'react'; + +import { getSiteContext } from '../../utils/siteContext'; + +import { User, IUserContext } from './types'; + +export const UserContext = createContext({ + currentUser: undefined, + // eslint-disable-next-line @typescript-eslint/no-unused-vars + setCurrentUser: (user: User | undefined) => {}, +}); + +export const tabPermissions: Record = { + docs: [], + load: ['airbyte'], + transform: ['code-server'], + observe: ['dbt-docs', 'local-dbt-docs', 'datahub'], + orchestrate: ['airflow'], + analyze: ['superset'], +}; + +export const HasTabAccess = function (user: User, tab: string) { + return tab === 'docs' + ? HasWorkbenchAccess(user) + : HasWorkbenchServiceAccess(user, tabPermissions[tab]); +}; + +export const HasWorkbenchServiceAccess = function (user: User, codes: string[]) { + const envSlug = getSiteContext().env; + const project = user.projects.find((project) => + project.environments.find((environment) => environment.slug === envSlug) + ); + const env = project && project.environments.find((environment) => environment.slug === envSlug); + return ( + user && + env && + confirmUserPermissions(user, codes, envSlug, project.slug) && + codes.some((code) => env.services[code].enabled && env.services[code].valid) + ); +}; + +export const confirmUserPermissions = ( + user: User, + codes: string[], + envSlug: string | undefined, + projectSlug: string | undefined +) => + user.permissions.find((permission) => + codes.some( + (code) => + permission.includes(`${envSlug}|workbench:${code}`) || + permission.includes(`${projectSlug}|workbench:${code}`) + ) + ); + +export const HasWorkbenchAccess = function (user: User) { + const envSlug = getSiteContext().env; + const project = user.projects.find((project) => + project.environments.find((environment) => environment.slug === envSlug) + ); + const env = project && project.environments.find((environment) => environment.slug === envSlug); + return ( + user && + env && + user.permissions.find( + (permission) => + permission.includes(`${envSlug}|workbench:`) || + permission.includes(`${project.slug}|workbench:`) + ) + ); +}; + +export const GetEnvironmentNameAndSlug = function (user: User | undefined) { + const envSlug = getSiteContext().env; + const project = user?.projects.find((project) => + project.environments.find((environment) => environment.slug === envSlug) + ); + const env = project && project.environments.find((environment) => environment.slug === envSlug); + return { + currentEnvName: env?.name, + currentEnvSlug: env?.slug.toUpperCase(), + }; +}; diff --git a/src/core/workbench/app/src/context/UserContext/api/getUserInfo.ts b/src/core/workbench/app/src/context/UserContext/api/getUserInfo.ts new file mode 100644 index 00000000..c383823b --- /dev/null +++ b/src/core/workbench/app/src/context/UserContext/api/getUserInfo.ts @@ -0,0 +1,21 @@ +import { useQuery } from 'react-query'; + +import { axios } from '../../../lib/axios'; +import { User } from '../types'; + +type TGetUserInfo = { + environment?: string; + account?: string; +}; + +export const getUserInfo = (params: TGetUserInfo): Promise => { + return axios.get('api/iam/user-info', { params }); +}; + +export const useGetUserInfo = (params: TGetUserInfo, options?: any) => { + return useQuery( + ['getUserInfo', params.account, params.environment], + async () => await getUserInfo(params), + options + ); +}; diff --git a/src/core/workbench/app/src/context/UserContext/index.ts b/src/core/workbench/app/src/context/UserContext/index.ts new file mode 100644 index 00000000..2d32a419 --- /dev/null +++ b/src/core/workbench/app/src/context/UserContext/index.ts @@ -0,0 +1 @@ +export * from './UserContext'; diff --git a/src/core/workbench/app/src/context/UserContext/types/index.ts b/src/core/workbench/app/src/context/UserContext/types/index.ts new file mode 100644 index 00000000..9167a2ec --- /dev/null +++ b/src/core/workbench/app/src/context/UserContext/types/index.ts @@ -0,0 +1,268 @@ +import { ConnectionTemplate } from '../../../features/global/types'; +import { Account } from '../../AccountContext/types'; + +export type AirflowConfig = { + dags_folder: string; + yaml_dags_folder: string; + dags_source: string; + git_branch?: string; + s3_sync?: S3DagSyncConfig; + logs?: DagLogsConfig; + resources?: AirflowResources; + api_enabled?: boolean; +}; + +export interface AirflowResources { + scheduler: ResourceConfig; + triggerer: ResourceConfig; + webserver: ResourceConfig; + workers: ResourceConfig; + statsd: ResourceConfig; +} + +export interface ResourceConfig { + requests: MemoryConfig; + limits: MemoryConfig; +} + +export interface MemoryConfig { + cpu: string; + memory: string; +} + +export type S3DagSyncConfig = { + path: string; + access_key?: string; + secret_key?: string; + iam_role?: string; +}; + +export type DagLogsConfig = { + external: boolean; + backend?: string; + s3_log_bucket?: string; + access_key?: string; + secret_key?: string; + volume_handle?: string; +}; + +export type DbtDocsConfig = { + git_branch: string; +}; + +export interface CodeServerConfig { + resources: ResourceConfig; +} + +export type EnvironmentIntegration = { + id?: string; + integration: string; + type: string; + service: string; + is_notification?: boolean; +}; + +export type Precondition = { + code: string; + message: string; +}; + +export type ServiceStatus = { + enabled: boolean; + valid: boolean; + unmet_preconditions?: Precondition[]; +}; + +export type Environment = { + id: string; + name: string; + services: Record; + type: string; + slug: string; + created_at: string; + project: string; + service_credentials_count: number; + dbt_home_path: string; + dbt_profiles_dir: string; + airflow_config: AirflowConfig; + dbt_docs_config: DbtDocsConfig; + code_server_config: CodeServerConfig; + integrations: EnvironmentIntegration[]; + variables: EnvironmentVariables; + release_profile: string; + settings: any; +}; + +export type EnvironmentKeys = { + id: string; + slug: string; + error?: string; + airflow_api_url?: string; + tokens?: string[]; + new_token?: string; +}; + +export type Repository = { + git_url: string; + url: string; + provider: string; +}; + +export type DeployCredentials = { + git_username: string; + git_password?: string; + azure_tenant?: string; +}; + +export type Project = { + clone_strategy: string; + id: string; + name: string; + release_branch: string; + slug: string; + settings: any; + ci_home_url: string; + ci_provider: string; + deploy_credentials: DeployCredentials; + deploy_key: string; + azure_deploy_key: string; + public_ssh_key?: string; + public_azure_key?: string; + repository: Repository; + environments: Environment[]; + connection_templates: ConnectionTemplate[]; + validated_at: string; + variables: EnvironmentVariables; + release_branch_protected: boolean; + secrets_backend: string; + secrets_backend_config: SecretsBackendConfig; + secrets_secondary_backend: string; + secrets_secondary_backend_config: any; +}; + +export type ProjectKeys = { + id: string; + slug: string; + error?: string; + tokens?: string[]; + new_token?: string; + dbt_api_url?: string; +}; + +export type Features = { + user_profile_delete_account: boolean; + user_profile_change_name: boolean; + user_profile_change_ssh_keys: boolean; + user_profile_change_ssl_keys: boolean; + user_profile_change_credentials: boolean; + accounts_signup: boolean; + admin_account: boolean; + admin_groups: boolean; + admin_create_groups: boolean; + admin_invitations: boolean; + admin_users: boolean; + admin_projects: boolean; + admin_environments: boolean; + admin_billing: boolean; + admin_connections: boolean; + admin_service_credentials: boolean; + admin_integrations: boolean; + admin_secrets: boolean; + admin_profiles: boolean; + admin_code_server_environment_variables: boolean; + admin_env_code_server_mem_and_cpu_resources: boolean; + admin_env_airflow_mem_and_cpu_resources: boolean; + stop_codeserver_on_inactivity: boolean; + shareable_codeserver: boolean; + codeserver_exposures: boolean; + codeserver_restart: boolean; + observability_stack: boolean; + select_minio_logs: boolean; + show_get_started_banner: boolean; + local_airflow: boolean; +}; + +export type UserEnvironment = { + id: string; + code_server_access: string; + services: any; + share_links: Record; + variables: EnvironmentVariables; + env_slug: string; + env_name: string; + project_name: string; +}; + +export type User = { + name: string; + email: string; + email_username: string; + slug: string; + avatar: string; + permissions: string[]; + projects: Project[]; + trial_accounts: number; + features: Features; + user_environments: UserEnvironment[]; + env_account?: string; + release: string; + customer_portal?: string; + has_license: boolean; + has_dynamic_blob_storage_provisioning: boolean; + has_dynamic_network_filesystem_provisioning: boolean; + setup_enabled: boolean | null; +}; + +export type Template = { + id: string; + name: string; + description: string; + context_type: string; + format: string; +}; + +export interface IUserContext { + currentUser: User | undefined; + setCurrentUser: (user: User | undefined) => void; +} + +export type ProfileFile = { + mount_path: string; + template: Template; + override_existent: boolean; + execute: boolean; +}; + +export type Profile = { + id: string; + name: string; + slug: string; + account: Account; + dbt_sync: boolean; + dbt_local_docs: boolean; + mount_ssl_keys: boolean; + mount_ssh_keys: boolean; + mount_api_token: boolean; + clone_repository: boolean; + files_from?: Profile; + files: ProfileFile[]; + profile_files_count: number; + is_system_profile: boolean; +}; + +export type EnvironmentVariableValue = { + value: string; + delete: boolean; +}; + +export type EnvironmentVariablesDeletable = { + [key: string]: EnvironmentVariableValue; +}; + +export type EnvironmentVariables = { + [key: string]: string; +}; + +export type SecretsBackendConfig = { + [key: string]: string; +}; diff --git a/src/core/workbench/app/src/context/index.tsx b/src/core/workbench/app/src/context/index.tsx new file mode 100644 index 00000000..7e8ee516 --- /dev/null +++ b/src/core/workbench/app/src/context/index.tsx @@ -0,0 +1,34 @@ +import React from 'react'; +import { ErrorBoundary } from 'react-error-boundary'; +import { QueryClientProvider } from 'react-query'; +import { ReactQueryDevtools } from 'react-query/devtools'; + +import { ErrorFallback } from '../components/Error'; +import { queryClient } from '../lib/react-query'; + +import { UIProvider } from './UIProvider'; + +type AppProviderProps = { + children: React.ReactNode; +}; + +declare global { + interface Window { + gtag: any; + } +} + +export const AppProvider = ({ children }: AppProviderProps) => { + return ( +
} + > + + {process.env.NODE_ENV !== 'test' && } + + {children} + + + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/account/api/deleteAccount.ts b/src/core/workbench/app/src/features/admin/account/api/deleteAccount.ts new file mode 100644 index 00000000..d0eb1ce3 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/api/deleteAccount.ts @@ -0,0 +1,51 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation, useQueryClient } from 'react-query'; +import { useNavigate } from 'react-router-dom'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const deleteAccount = (account: string) => { + return axios.delete(`/api/admin/${account}/settings`); +}; + +export const useDeleteAccount = () => { + const queryClient = useQueryClient(); + const navigate = useNavigate(); + const toast = useToast(); + return useMutation( + 'deleteAccount', + async ({ account }: { account: string }) => { + return await deleteAccount(account); + }, + { + onSuccess: () => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Account deleted successfully', + status: 'success', + }); + }, + isClosable: true, + }); + queryClient.invalidateQueries('getUserAccounts'); + queryClient.invalidateQueries('getUserInfo'); + navigate('/launchpad'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error deleting account', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/account/api/getAccountDetail.ts b/src/core/workbench/app/src/features/admin/account/api/getAccountDetail.ts new file mode 100644 index 00000000..96755d98 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/api/getAccountDetail.ts @@ -0,0 +1,12 @@ +import { useQuery } from 'react-query'; + +import { Account } from '../../../../context/AccountContext/types'; +import { axios } from '../../../../lib/axios'; + +export const getAccountDetail = (account?: string): Promise => { + return axios.get(`api/admin/${account}/settings`); +}; + +export const useAccountDetail = (account?: string, options?: any) => { + return useQuery('accountDetail', async () => await getAccountDetail(account), options); +}; diff --git a/src/core/workbench/app/src/features/admin/account/api/updateAccountDetail.ts b/src/core/workbench/app/src/features/admin/account/api/updateAccountDetail.ts new file mode 100644 index 00000000..d404d2dd --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/api/updateAccountDetail.ts @@ -0,0 +1,51 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation, useQueryClient } from 'react-query'; +import { useNavigate } from 'react-router-dom'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const updateAccountDetail = (account: string, data: any) => { + return axios.put(`api/admin/${account}/settings`, data); +}; + +export const useUpdateAccountDetail = () => { + const queryClient = useQueryClient(); + const navigate = useNavigate(); + const toast = useToast(); + + return useMutation( + 'updateAccountDetail', + async ({ account, body }: { account: string; body: any }) => { + return await updateAccountDetail(account, body); + }, + { + onSuccess: () => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Account updated successfully', + status: 'success', + }); + }, + isClosable: true, + }); + queryClient.invalidateQueries('getUserAccounts'); + navigate('/launchpad'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error updating account', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/account/components/AccountSettings.tsx b/src/core/workbench/app/src/features/admin/account/components/AccountSettings.tsx new file mode 100644 index 00000000..f01c688b --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/components/AccountSettings.tsx @@ -0,0 +1,75 @@ +import { Box, Button, Stack, StackDivider, Text, StackProps, VStack } from '@chakra-ui/react'; +import { Formik, Form } from 'formik'; +import { InputControl } from 'formik-chakra-ui'; +import React, { useContext } from 'react'; +import * as Yup from 'yup'; + +import { Card } from '../../../../components/Card'; +import { HeadingGroup } from '../../../../components/ProfileForm/HeadingGroup'; +import { AccountContext } from '../../../../context/AccountContext'; +import { useUpdateAccountDetail } from '../api/updateAccountDetail'; + +export const AccountSettings = (props: StackProps) => { + const { currentAccount } = useContext(AccountContext); + + const initialValues = { + name: currentAccount ? currentAccount.name : '', + owned_by: currentAccount ? currentAccount.owned_by : '', + }; + const updateMutation = useUpdateAccountDetail(); + + const handleSubmit = (val: any, { setSubmitting }: any) => { + const body = { + name: val.name, + }; + if (currentAccount?.slug !== undefined) { + updateMutation.mutate({ account: currentAccount.slug, body }); + } + setSubmitting(false); + }; + + return ( + + + + } spacing="6"> + +
+ + + + + Account related notifications are sent to owner's email address. + + + + + + + +
+
+
+
+
+ ); +}; diff --git a/src/core/workbench/app/src/features/admin/account/components/AccountSubscription.tsx b/src/core/workbench/app/src/features/admin/account/components/AccountSubscription.tsx new file mode 100644 index 00000000..fb65a696 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/components/AccountSubscription.tsx @@ -0,0 +1,140 @@ +import { CheckIcon } from '@chakra-ui/icons'; +import { + Box, + Button, + Flex, + Radio, + Stack, + StackDivider, + Tag, + TagLabel, + TagLeftIcon, + VStack, + Text, + HStack, +} from '@chakra-ui/react'; +import { Formik, Form } from 'formik'; +import { RadioGroupControl, SelectControl } from 'formik-chakra-ui'; +import React, { useContext, useState } from 'react'; + +import { Card } from '../../../../components/Card'; +import { HeadingGroup } from '../../../../components/ProfileForm/HeadingGroup'; +import { AccountContext } from '../../../../context/AccountContext'; +import { UserContext } from '../../../../context/UserContext'; +import { useAccountSubscribe } from '../../../global/api/accountSubscribe'; + +export const AccountSubscription = () => { + const { currentUser } = useContext(UserContext); + const { currentAccount } = useContext(AccountContext); + const initialValues = { + plan: currentAccount && currentAccount.plan ? currentAccount.plan.kind : 'starter', + billing_period: + currentAccount && currentAccount.plan ? currentAccount.plan.billing_period : 'monthly', + }; + const [currentPlan, setCurrentPlan] = useState(initialValues.plan); + + const subscribeAccountMutation = useAccountSubscribe(); + + const isOnTrial = + currentAccount && currentAccount.plan && currentAccount.remaining_trial_days >= 0; + + const hasStripeSubscription = + currentAccount && + currentAccount.subscription_id !== null && + currentAccount.subscription_id !== undefined; + + const handleSubmit = (body: any) => { + if (hasStripeSubscription) { + alert('Changing an active subscription is not implemented yet'); + } else { + body.account_slug = currentAccount?.slug; + subscribeAccountMutation.mutateAsync({ body: body }).then((data: any) => { + window.location.href = data.checkout_session_url; + }); + } + }; + + const onPlanChange = (event: any) => { + setCurrentPlan(event.target.value); + }; + + const handleManageSubscription = () => { + if (currentUser?.customer_portal) { + window.location.href = currentUser.customer_portal; + } + }; + + return ( + + + + } spacing="6"> + +
+ {isOnTrial && ( + + + + + on Free Trial - {`${currentAccount.remaining_trial_days} days left`} + + + + )} + + + + Starter Plan + Growth Plan + + + + + + + + + {!hasStripeSubscription && ( + + + {!currentUser?.features.admin_billing && ( + Subscribe feature is temporarily disabled. + )} + + )} + {hasStripeSubscription && currentUser?.customer_portal && ( + + + + Manage your current subscription, invoices, and payments. + + + )} + +
+
+
+
+
+ ); +}; diff --git a/src/core/workbench/app/src/features/admin/account/components/DangerZone.tsx b/src/core/workbench/app/src/features/admin/account/components/DangerZone.tsx new file mode 100644 index 00000000..eb454594 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/components/DangerZone.tsx @@ -0,0 +1,51 @@ +import { Button, Stack, StackProps, Text } from '@chakra-ui/react'; +import React, { useContext } from 'react'; + +import { AlertDialog } from '../../../../components/AlertDialog'; +import { Card } from '../../../../components/Card'; +import { HeadingGroup } from '../../../../components/ProfileForm/HeadingGroup'; +import { AccountContext } from '../../../../context/AccountContext'; +import { useDeleteAccount } from '../api/deleteAccount'; + +export const DangerZone = (props: StackProps) => { + const [isOpen, setIsOpen] = React.useState(false); + const onClose = () => setIsOpen(false); + const { currentAccount } = useContext(AccountContext); + + const deleteMutation = useDeleteAccount(); + const handleConfirmDelete = () => { + if (currentAccount) { + deleteMutation.mutate({ + account: currentAccount.slug, + }); + } + }; + + return ( + + + + Delete my account and data + + Once you delete your account, there is no going back. Please be certain. + + + handleConfirmDelete()} + /> + + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/account/index.ts b/src/core/workbench/app/src/features/admin/account/index.ts new file mode 100644 index 00000000..a3820983 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/index.ts @@ -0,0 +1 @@ +export * from './routes'; diff --git a/src/core/workbench/app/src/features/admin/account/routes/Account.tsx b/src/core/workbench/app/src/features/admin/account/routes/Account.tsx new file mode 100644 index 00000000..9d69becb --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/routes/Account.tsx @@ -0,0 +1,40 @@ +import { Box, Flex, Stack, useColorModeValue } from '@chakra-ui/react'; +import { useContext } from 'react'; + +import { Breadcrumb } from '../../../../components/Breadcrumb'; +import { Header } from '../../../../components/Header'; +import PageSidebarContainer from '../../../../components/Sidebar/PageSidebarContainer'; +import { AccountContext } from '../../../../context/AccountContext'; +import { UserContext } from '../../../../context/UserContext'; +import { AccountSettings } from '../components/AccountSettings'; +import { AccountSubscription } from '../components/AccountSubscription'; +import { DangerZone } from '../components/DangerZone'; + +export const Account = () => { + const { currentUser } = useContext(UserContext); + const { currentAccount } = useContext(AccountContext); + + return ( + +
+ + + + + + + {currentUser?.features.accounts_signup && + currentAccount?.owned_by === currentUser.email && } + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/account/routes/index.tsx b/src/core/workbench/app/src/features/admin/account/routes/index.tsx new file mode 100644 index 00000000..8d84d152 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/account/routes/index.tsx @@ -0,0 +1,11 @@ +import { Route, Routes } from 'react-router-dom'; + +import { Account } from './Account'; + +export const AccountRoutes = () => { + return ( + + } /> + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/billing/api/getTotalUsers.ts b/src/core/workbench/app/src/features/admin/billing/api/getTotalUsers.ts new file mode 100644 index 00000000..10f0afd5 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/billing/api/getTotalUsers.ts @@ -0,0 +1,6 @@ +import { axios } from '../../../../lib/axios'; +import { User } from '../../users/types'; + +export const getTotalUsers = (account: string): Promise => { + return axios.get(`api/admin/${account}/users`); +}; diff --git a/src/core/workbench/app/src/features/admin/billing/components/PricingCard.tsx b/src/core/workbench/app/src/features/admin/billing/components/PricingCard.tsx new file mode 100644 index 00000000..af01fb01 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/billing/components/PricingCard.tsx @@ -0,0 +1,173 @@ +import { + Box, + BoxProps, + Button, + createIcon, + Divider, + Heading, + List, + ListItem, + ListItemProps, + Text, + Tag, + useColorModeValue as mode, +} from '@chakra-ui/react'; +import React from 'react'; + +const CheckIcon = createIcon({ + viewBox: '0 0 17 12', + d: 'M0 5.82857L1.64571 4.11429L5.48571 7.2L14.8114 0L16.4571 1.71429L5.48571 12L0 5.82857Z', +}); + +const PricingDetail = (props: ListItemProps & { iconColor: string }) => { + const { children, iconColor, ...rest } = props; + return ( + + + + {children} + + + ); +}; + +interface PricingCardProps extends BoxProps { + features: string[]; + name: string; + duration: string; + extras: string; + description: string; + price: string; + trialDays: number; + totalUsers?: number; + subscribed: boolean; + enterprise: boolean; + onClick?: () => void; + colorScheme: string; +} + +export const PricingCard = (props: PricingCardProps) => { + const { + features, + name, + description, + duration, + price, + extras, + trialDays, + totalUsers, + subscribed, + enterprise, + onClick, + colorScheme: c, + ...rest + } = props; + + return ( + + {name} + + + + {description} + + + + {enterprise ? ( + + {price} + + ) : ( + + {price} + + )} + + {duration} + + + {extras} + + + {subscribed ? ( + <> + + Developer seats used: {totalUsers} + + + + ) : ( + <> + {enterprise ? ( + <> + + {trialDays} + + + + ) : ( + <> + + Your free trial expires in {trialDays} days. + +
+ + +
+ + )} + + )} + + + + What you get: + + + {features.map((feature, index) => ( + + {feature} + + ))} + + +
+ ); +}; diff --git a/src/core/workbench/app/src/features/admin/billing/components/index.ts b/src/core/workbench/app/src/features/admin/billing/components/index.ts new file mode 100644 index 00000000..0316ce43 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/billing/components/index.ts @@ -0,0 +1 @@ +export * from './PricingCard'; diff --git a/src/core/workbench/app/src/features/admin/billing/index.ts b/src/core/workbench/app/src/features/admin/billing/index.ts new file mode 100644 index 00000000..dd0c906a --- /dev/null +++ b/src/core/workbench/app/src/features/admin/billing/index.ts @@ -0,0 +1,2 @@ +export * from './components'; +export * from './routes'; diff --git a/src/core/workbench/app/src/features/admin/billing/routes/Cancel.tsx b/src/core/workbench/app/src/features/admin/billing/routes/Cancel.tsx new file mode 100644 index 00000000..44d2ef18 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/billing/routes/Cancel.tsx @@ -0,0 +1,58 @@ +import { Text } from '@chakra-ui/layout'; +import { Box, Button, Container, Flex, Heading, Stack, useColorModeValue } from '@chakra-ui/react'; +import { useContext } from 'react'; +import { useNavigate } from 'react-router-dom'; + +import { Header } from '../../../../components/Header'; +import PageSidebarContainer from '../../../../components/Sidebar/PageSidebarContainer'; +import { AccountContext } from '../../../../context/AccountContext'; +import { UserContext } from '../../../../context/UserContext'; + +export const Cancel = () => { + const { currentUser } = useContext(UserContext); + const { currentAccount } = useContext(AccountContext); + const navigate = useNavigate(); + const contactSales = () => { + window.location.href = 'mailto:sales@datacoves.com'; + navigate('/'); + }; + return ( + +
+ + + + + + + Subscription process cancelled + + You've just cancelled the subscription process. + + + Contact us if you have any question, or start it over again. + + + + {currentUser && currentUser.projects.length > 0 && ( + + )} + {currentUser?.projects.length === 0 && ( + + )} + + + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/billing/routes/Checkout.tsx b/src/core/workbench/app/src/features/admin/billing/routes/Checkout.tsx new file mode 100644 index 00000000..23a98f27 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/billing/routes/Checkout.tsx @@ -0,0 +1,50 @@ +import { Text } from '@chakra-ui/layout'; +import { Box, Button, Container, Flex, Heading, Stack, useColorModeValue } from '@chakra-ui/react'; +import { useContext } from 'react'; +import { useNavigate } from 'react-router-dom'; + +import { Header } from '../../../../components/Header'; +import PageSidebarContainer from '../../../../components/Sidebar/PageSidebarContainer'; +import { AccountContext } from '../../../../context/AccountContext'; +import { UserContext } from '../../../../context/UserContext'; + +export const Checkout = () => { + const { currentUser } = useContext(UserContext); + const { currentAccount } = useContext(AccountContext); + const navigate = useNavigate(); + return ( + +
+ + + + + + + Subscription process completed + + You've just completed the subscription process. + + + {currentUser && currentUser.projects.length > 0 && ( + + )} + {currentUser?.projects.length === 0 && ( + + )} + + + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/billing/routes/index.tsx b/src/core/workbench/app/src/features/admin/billing/routes/index.tsx new file mode 100644 index 00000000..dcfafbd6 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/billing/routes/index.tsx @@ -0,0 +1,13 @@ +import { Route, Routes } from 'react-router-dom'; + +import { Cancel } from './Cancel'; +import { Checkout } from './Checkout'; + +export const BillingRoutes = () => { + return ( + + } /> + } /> + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/api/createConnectionTemplate.ts b/src/core/workbench/app/src/features/admin/connection-templates/api/createConnectionTemplate.ts new file mode 100644 index 00000000..cbe186d7 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/api/createConnectionTemplate.ts @@ -0,0 +1,40 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation } from 'react-query'; +import { useNavigate } from 'react-router-dom'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const createConnectionTemplate = (account: string, data: any) => { + return axios.post(`/api/admin/${account}/connectiontemplates`, data); +}; + +export const useCreateConnectionTemplate = () => { + const navigate = useNavigate(); + const toast = useToast(); + + return useMutation( + 'createConnectionTemplate', + async ({ account, body }: { account: string; body: any }) => { + return await createConnectionTemplate(account, body); + }, + { + onSuccess: () => { + navigate('/admin/connection-templates'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error creating connection template', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/api/deleteConnectionTemplate.ts b/src/core/workbench/app/src/features/admin/connection-templates/api/deleteConnectionTemplate.ts new file mode 100644 index 00000000..7dbecf58 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/api/deleteConnectionTemplate.ts @@ -0,0 +1,39 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation, useQueryClient } from 'react-query'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const deleteConnectionTemplates = (account: string, id: string) => { + return axios.delete(`/api/admin/${account}/connectiontemplates/${id}`); +}; + +export const useDeleteConnectionTemplates = () => { + const queryClient = useQueryClient(); + const toast = useToast(); + + return useMutation( + 'deleteConnectionTemplates', + async ({ account, id }: { account: string; id: string }) => { + return await deleteConnectionTemplates(account, id); + }, + { + onSuccess: () => { + queryClient.invalidateQueries('connectionTemplates'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error deleting connection template', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/api/getConnectionTemplateById.ts b/src/core/workbench/app/src/features/admin/connection-templates/api/getConnectionTemplateById.ts new file mode 100644 index 00000000..975e181b --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/api/getConnectionTemplateById.ts @@ -0,0 +1,19 @@ +import { useQuery } from 'react-query'; + +import { axios } from '../../../../lib/axios'; +import { ConnectionTemplate } from '../../../global/types'; + +export const getConnectionTemplateById = ( + account?: string, + id?: string +): Promise => { + return axios.get(`/api/admin/${account}/connectiontemplates/${id}`); +}; + +export const useConnectionTemplate = (account?: string, id?: string, options?: any) => { + return useQuery( + 'connectionTemplate', + async () => await getConnectionTemplateById(account, id), + options + ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/api/getConnectionTemplates.ts b/src/core/workbench/app/src/features/admin/connection-templates/api/getConnectionTemplates.ts new file mode 100644 index 00000000..30d1759b --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/api/getConnectionTemplates.ts @@ -0,0 +1,47 @@ +import { useQuery } from 'react-query'; + +import { axios } from '../../../../lib/axios'; +import { IAPIPaginatedResponse } from '../../../../types'; +import { ConnectionTemplate } from '../../../global/types'; + +interface IGetConnectionTemplates { + account?: string; + search?: string; + project?: string; + limit?: number; + offset?: number; +} + +interface IConnectionTemplatesResponse extends IAPIPaginatedResponse { + results: ConnectionTemplate[]; +} + +export const getConnectionTemplates = ({ + account, + search, + project, + limit, + offset, +}: IGetConnectionTemplates): Promise => { + const params = { + search: search ? search : undefined, + project: project ? project : undefined, + limit: limit ? limit : undefined, + offset: offset ? offset : undefined, + }; + return axios.get(`/api/admin/${account}/connectiontemplates`, { params: params }); +}; + +export const useConnectionTemplates = ({ + account, + search, + project, + limit, + offset, +}: IGetConnectionTemplates) => { + return useQuery({ + enabled: account !== undefined, + queryKey: ['connectionTemplates', account, search, project, limit, offset], + queryFn: () => getConnectionTemplates({ account, search, project, limit, offset }), + }); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/api/updateConnectionTemplate.ts b/src/core/workbench/app/src/features/admin/connection-templates/api/updateConnectionTemplate.ts new file mode 100644 index 00000000..dc49fb84 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/api/updateConnectionTemplate.ts @@ -0,0 +1,40 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation } from 'react-query'; +import { useNavigate } from 'react-router-dom'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const updateConnectionTemplate = (account: string, id: string, data: any) => { + return axios.put(`/api/admin/${account}/connectiontemplates/${id}`, data); +}; + +export const useUpdateConnectionTemplate = () => { + const navigate = useNavigate(); + const toast = useToast(); + + return useMutation( + 'updateConnectionTemplate', + async ({ account, id, body }: { account: string; id: string; body: any }) => { + return await updateConnectionTemplate(account, id, body); + }, + { + onSuccess: () => { + navigate('/admin/connection-templates'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error updating connection template', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/components/ConnectionTemplatesTable.tsx b/src/core/workbench/app/src/features/admin/connection-templates/components/ConnectionTemplatesTable.tsx new file mode 100644 index 00000000..93f67816 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/components/ConnectionTemplatesTable.tsx @@ -0,0 +1,146 @@ +import { EditIcon, DeleteIcon } from '@chakra-ui/icons'; +import { + Button, + Table, + Tbody, + Td, + Th, + Thead, + Tr, + Text, + useColorModeValue as mode, + Stack, + VStack, + Box, +} from '@chakra-ui/react'; +import React, { useContext, useState } from 'react'; +import { useNavigate } from 'react-router-dom'; + +import { AlertDialog } from '../../../../components/AlertDialog'; +import { AccountContext } from '../../../../context/AccountContext'; +import { Project } from '../../../../context/UserContext/types'; +import { ConnectionTypeCell } from '../../../global/components/ConnectionTypeCell'; +import { ConnectionTemplate } from '../../../global/types'; +import { useDeleteConnectionTemplates } from '../api/deleteConnectionTemplate'; + +type IConnectionColumn = { + header: string; + cell: (data: ConnectionTemplate) => JSX.Element; +}; + +export const ConnectionTemplatesTable = (props: any) => { + const [isConfirmOpen, setIsConfirmOpen] = useState(false); + const [selectedConnectionTemplate, setSelectedConnectionTemplate] = + useState(); + const { currentAccount } = useContext(AccountContext); + const onClose = () => setIsConfirmOpen(false); + const navigate = useNavigate(); + const deleteMutation = useDeleteConnectionTemplates(); + const handleDelete = (connection: ConnectionTemplate) => { + setSelectedConnectionTemplate(connection); + setIsConfirmOpen(true); + }; + const handleConfirmDelete = () => { + if (selectedConnectionTemplate && currentAccount) { + deleteMutation.mutate({ + account: currentAccount.slug, + id: selectedConnectionTemplate.id, + }); + } + }; + const columns: IConnectionColumn[] = [ + { + header: 'Project', + cell: function ProjectCell(data: ConnectionTemplate) { + const project = props.projects?.find((project: Project) => project.id === data.project); + return {project?.name}; + }, + }, + { + header: 'Name', + cell: function NameCell(data: ConnectionTemplate) { + return {data.name}; + }, + }, + { + header: 'Type', + cell: function NameCell(connection_template: ConnectionTemplate) { + return ; + }, + }, + { + header: 'Used by', + cell: function NameCell(conn: ConnectionTemplate) { + const services = conn.service_credentials_count === 1 ? 'service' : 'services'; + const users = conn.user_credentials_count === 1 ? 'user' : 'users'; + return ( + {`${conn.service_credentials_count} ${services}, ${conn.user_credentials_count} ${users}`} + ); + }, + }, + { + header: '', + cell: function ActionsCell(data: ConnectionTemplate) { + return ( + + + + + ); + }, + }, + ]; + return ( + + + + + {columns.map((column, index) => ( + + ))} + + + + {props.data?.map((row: any, index: number) => ( + + {columns.map((column, index) => ( + + ))} + + ))} + +
+ {column.header} + +
+ {column.cell?.(row)} +
+ + {selectedConnectionTemplate?.service_credentials_count === 0 && + selectedConnectionTemplate?.user_credentials_count === 0 ? ( + You cannot undo this action. + ) : ( + {`${selectedConnectionTemplate?.service_credentials_count} service connections and ${selectedConnectionTemplate?.user_credentials_count} user connections will be permanently deleted.`} + )} + Are you sure? + + } + confirmLabel="Delete" + onClose={onClose} + onConfirm={handleConfirmDelete} + /> +
+ ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/components/ConnectionTemplatesTableActions.tsx b/src/core/workbench/app/src/features/admin/connection-templates/components/ConnectionTemplatesTableActions.tsx new file mode 100644 index 00000000..c035515a --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/components/ConnectionTemplatesTableActions.tsx @@ -0,0 +1,41 @@ +import { ButtonGroup, HStack, Stack, Select, Button } from '@chakra-ui/react'; +import React from 'react'; +import { RiAddFill } from 'react-icons/ri'; +import { useNavigate } from 'react-router-dom'; + +export const ConnectionTemplatesTableActions = (props: any) => { + const navigate = useNavigate(); + const handleChange = (event: any) => { + props.setSelectedProject(event.target.value); + }; + + return ( + + + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/components/index.ts b/src/core/workbench/app/src/features/admin/connection-templates/components/index.ts new file mode 100644 index 00000000..0e61706e --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/components/index.ts @@ -0,0 +1,2 @@ +export * from './ConnectionTemplatesTable'; +export * from './ConnectionTemplatesTableActions'; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/index.ts b/src/core/workbench/app/src/features/admin/connection-templates/index.ts new file mode 100644 index 00000000..dd0c906a --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/index.ts @@ -0,0 +1,2 @@ +export * from './components'; +export * from './routes'; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/routes/ConnectionTemplateFormPage.tsx b/src/core/workbench/app/src/features/admin/connection-templates/routes/ConnectionTemplateFormPage.tsx new file mode 100644 index 00000000..35c7a5f2 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/routes/ConnectionTemplateFormPage.tsx @@ -0,0 +1,409 @@ +import { ExternalLinkIcon, WarningIcon } from '@chakra-ui/icons'; +import { Box, Stack, StackDivider, Button, HStack, VStack, Text, Link } from '@chakra-ui/react'; +import { Formik, Form } from 'formik'; +import { SelectControl, InputControl, SwitchControl } from 'formik-chakra-ui'; +import React, { useContext, useState } from 'react'; +import { useParams, useNavigate } from 'react-router-dom'; +import * as Yup from 'yup'; + +import { BasePage } from '../../../../components/AdminLayout'; +import { FieldGroup } from '../../../../components/FieldGroup'; +import { LoadingWrapper } from '../../../../components/LoadingWrapper'; +import { AccountContext } from '../../../../context/AccountContext'; +import { Project, Template } from '../../../../context/UserContext/types'; +import { useConnectionTypes } from '../../../global/api/getAccountConnectionTypes'; +import { ConnectionTemplate, ConnectionType } from '../../../global/types'; +import { useAllProjects } from '../../projects/api/getProjects'; +import { useAllTemplates } from '../../templates/api/getTemplates'; +import { useCreateConnectionTemplate } from '../api/createConnectionTemplate'; +import { useConnectionTemplate } from '../api/getConnectionTemplateById'; +import { useUpdateConnectionTemplate } from '../api/updateConnectionTemplate'; + +export const ConnectionTemplateFormPage = () => { + const { id } = useParams(); + const isCreateMode = id === undefined; + const { currentAccount } = useContext(AccountContext); + const [currentType, setCurrentType] = useState(); + const [currentConnectionUser, setCurrentConnectionUser] = useState(); + const [currentForUsers, setCurrentForUsers] = useState(); + const { + data: connectionTypes, + isSuccess: connectionTypesSuccess, + isLoading: isLoadingConnectionTypes, + } = useConnectionTypes({ + account: currentAccount?.slug, + }); + + const initialValues = { + name: '', + project: '', + type: '', + connection_user: 'provided', + for_users: false, + }; + + const { + data: projects, + isSuccess: projectsSuccess, + isLoading: isLoadingProjects, + } = useAllProjects({ + account: currentAccount?.slug, + }); + + const { + data: templates, + isSuccess: templatesSuccess, + isLoading: isLoadingTemplates, + } = useAllTemplates({ + account: currentAccount?.slug, + contextType: 'user', + enabled_for: 'ConnectionTemplate', + }); + + const createMutation = useCreateConnectionTemplate(); + const updateMutation = useUpdateConnectionTemplate(); + const handleSubmit = (body: any, { resetForm }: any) => { + if (currentAccount?.slug !== undefined) { + if (body.connection_details) { + if (currentType?.slug === 'snowflake') { + body.connection_details = { + account: body.connection_details.account, + database: body.connection_details.database, + warehouse: body.connection_details.warehouse, + role: body.connection_details.role, + mfa_protected: body.connection_details.mfa_protected || false, + }; + } + if (currentType?.slug === 'redshift') { + body.connection_details = { + host: body.connection_details.host, + database: body.connection_details.database, + }; + } + if (currentType?.slug === 'bigquery') { + body.connection_details = { + dataset: body.connection_details.dataset, + }; + } + if (currentType?.slug === 'databricks') { + body.connection_details = { + host: body.connection_details.host, + schema: body.connection_details.schema, + http_path: body.connection_details.http_path, + }; + } + } + + if (isCreateMode) { + createMutation.mutate({ + account: currentAccount?.slug, + body, + }); + } else { + updateMutation.mutate({ + account: currentAccount?.slug, + id, + body, + }); + } + } + resetForm(); + }; + const handleSelectType = (event: any) => { + const connection = connectionTypes?.find( + (type) => parseInt(type.id) === parseInt(event.target.value) + ); + setCurrentType(connection); + }; + const handleSelectConnectionUser = (event: any) => { + setCurrentConnectionUser(event.target.value); + }; + const handleSelectForUsers = (event: any) => { + // This is triggered before value is actually changed + setCurrentForUsers(event.target.value !== 'true'); + }; + const navigate = useNavigate(); + + return ( + + + }> + currentForUsers, + then: Yup.string().required('Required'), + otherwise: Yup.string(), + }), + connection_user_template: Yup.string().when({ + is: () => currentForUsers && currentConnectionUser === 'template', + then: Yup.string().required('Required'), + otherwise: Yup.string().nullable(), + }), + connection_details: Yup.object().when({ + is: () => currentType?.slug === 'snowflake', + then: Yup.object().shape({ + account: Yup.string().matches( + /^(?!.*\.(?!privatelink$)).*$/, + 'Accounts must not contain a period. Try a dash instead.' + ), + }), + otherwise: Yup.object().shape({}), + }), + })} + onSubmit={handleSubmit} + > + {function Render({ setFieldValue }) { + const { data, isSuccess, isLoading } = useConnectionTemplate( + currentAccount?.slug, + id, + { + enabled: + currentAccount?.slug !== undefined && + id !== undefined && + connectionTypesSuccess, + onSuccess: (data: ConnectionTemplate) => { + setFieldValue('name', data.name); + setFieldValue('project', data.project); + setFieldValue('type', data.type); + const type = connectionTypes?.find( + (type) => parseInt(type.id) === parseInt(data.type) + ); + setCurrentType(type); + setFieldValue('for_users', data.for_users); + setCurrentForUsers(data.for_users); + setFieldValue('connection_user', data.connection_user); + setCurrentConnectionUser(data.connection_user); + setFieldValue('connection_user_template', data.connection_user_template); + setFieldValue('connection_details.account', data.connection_details.account); + setFieldValue( + 'connection_details.warehouse', + data.connection_details.warehouse + ); + setFieldValue('connection_details.database', data.connection_details.database); + setFieldValue('connection_details.host', data.connection_details.host); + setFieldValue('connection_details.role', data.connection_details.role); + setFieldValue( + 'connection_details.http_path', + data.connection_details.http_path + ); + setFieldValue('connection_details.schema', data.connection_details.schema); + setFieldValue('connection_details.dataset', data.connection_details.dataset); + setFieldValue( + 'connection_details.mfa_protected', + data.connection_details.mfa_protected + ); + }, + } + ); + return ( + +
+ + + + + + This is the identifier displayed when you use it in your services and + user settings. + + + + {projects?.map((project: Project) => { + return ( + + ); + })} + + + {connectionTypes?.map((type: ConnectionType) => { + return ( + + ); + })} + + + + + + + + + {!currentForUsers && + !isCreateMode && + data?.for_users && + data?.user_credentials_count !== 0 && ( + + + {`This action will delete ${ + data?.user_credentials_count + } user ${ + data?.user_credentials_count === 1 + ? 'connection' + : 'connections' + }`} + + )} + + + When enabled, the connection template can be selected by users when they + configure DB connections. + + + {currentForUsers && currentType && currentType.slug === 'snowflake' && ( + <> + + + + Make sure to{' '} + + enable Snowflake MFA caching + {' '} + for a smoother experience + + + + + + + + + + + )} + {currentForUsers && + currentConnectionUser === 'template' && + currentType && + currentType.slug === 'snowflake' && ( + + {templates?.map((template: Template) => { + return ( + + ); + })} + + )} + + + {currentType && currentType.slug === 'snowflake' && ( + + + + + + + + + + + + + )} + {currentType && currentType.slug === 'redshift' && ( + + + + + + + )} + {currentType && currentType.slug === 'databricks' && ( + + + + + + + + + + + + )} + {currentType && currentType.slug === 'bigquery' && ( + + + + + + )} + + + + + + +
+
+ ); + }} +
+
+
+
+ ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/routes/ConnectionTemplates.tsx b/src/core/workbench/app/src/features/admin/connection-templates/routes/ConnectionTemplates.tsx new file mode 100644 index 00000000..5dcd86bf --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/routes/ConnectionTemplates.tsx @@ -0,0 +1,74 @@ +import { Box } from '@chakra-ui/layout'; +import React, { useContext, useState, useEffect } from 'react'; +import { useSearchParams } from 'react-router-dom'; + +import { BasePage } from '../../../../components/AdminLayout'; +import { TablePagination } from '../../../../components/AdminLayout/components/TablePagination'; +import { LoadingWrapper } from '../../../../components/LoadingWrapper'; +import { AccountContext } from '../../../../context/AccountContext'; +import { useAllProjects } from '../../projects/api/getProjects'; +import { useConnectionTemplates } from '../api/getConnectionTemplates'; +import { ConnectionTemplatesTable, ConnectionTemplatesTableActions } from '../components'; + +export const ConnectionTemplates = () => { + const [searchParams] = useSearchParams(); + const pId = searchParams.get('projectId'); + + const pageLimit = 10; + const [pageOffset, setPageOffset] = useState(0); + const [selectedProject, setSelectedProject] = useState(); + const { currentAccount } = useContext(AccountContext); + + useEffect(() => { + if (pId !== null) { + setSelectedProject(pId); + } + }, [pId]); + + const connectionsQuery = useConnectionTemplates({ + account: currentAccount?.slug, + project: selectedProject, + limit: pageLimit, + offset: pageOffset, + }); + + const { + data: projects, + isLoading: isLoadingProjects, + isSuccess: isProjectsSuccess, + } = useAllProjects({ + account: currentAccount?.slug, + }); + const prevPageHandler = () => { + setPageOffset((current) => current - pageLimit); + }; + const nextPageHandler = () => { + setPageOffset((current) => current + pageLimit); + }; + + return ( + + + + + + + + + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/connection-templates/routes/index.tsx b/src/core/workbench/app/src/features/admin/connection-templates/routes/index.tsx new file mode 100644 index 00000000..cacfc7fb --- /dev/null +++ b/src/core/workbench/app/src/features/admin/connection-templates/routes/index.tsx @@ -0,0 +1,13 @@ +import { Route, Routes } from 'react-router-dom'; + +import { ConnectionTemplateFormPage } from './ConnectionTemplateFormPage'; +import { ConnectionTemplates } from './ConnectionTemplates'; +export const ConnectionTemplatesRoutes = () => { + return ( + + } /> + } /> + } /> + + ); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/createEnvironment.ts b/src/core/workbench/app/src/features/admin/environments/api/createEnvironment.ts new file mode 100644 index 00000000..7b71efad --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/createEnvironment.ts @@ -0,0 +1,40 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation } from 'react-query'; +import { useNavigate } from 'react-router-dom'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const createEnvironment = (account: string, data: any) => { + return axios.post(`/api/admin/${account}/environments`, data); +}; + +export const useCreateEnvironment = () => { + const navigate = useNavigate(); + const toast = useToast(); + + return useMutation( + 'createEnvironment', + async ({ account, body }: { account: string; body: any }) => { + return await createEnvironment(account, body); + }, + { + onSuccess: () => { + navigate('/admin/environments'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error creating environment', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/deleteEnvironment.ts b/src/core/workbench/app/src/features/admin/environments/api/deleteEnvironment.ts new file mode 100644 index 00000000..f6ddb583 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/deleteEnvironment.ts @@ -0,0 +1,40 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation, useQueryClient } from 'react-query'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const deleteEnvironments = (account: string, id: string) => { + return axios.delete(`/api/admin/${account}/environments/${id}`); +}; + +export const useDeleteEnvironments = () => { + const queryClient = useQueryClient(); + const toast = useToast(); + + return useMutation( + 'deleteEnvironments', + async ({ account, id }: { account: string; id: string }) => { + return await deleteEnvironments(account, id); + }, + { + onSuccess: () => { + queryClient.invalidateQueries('environments'); + queryClient.invalidateQueries('getUserInfo'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error deleting environment', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/deleteEnvironmentKey.ts b/src/core/workbench/app/src/features/admin/environments/api/deleteEnvironmentKey.ts new file mode 100644 index 00000000..8880c9ba --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/deleteEnvironmentKey.ts @@ -0,0 +1,35 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation } from 'react-query'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const deleteEnvironmentKey = (account: string, id: string, key: string) => { + return axios.delete(`/api/admin/${account}/environments/${id}/keys/${key}`); +}; + +export const useDeleteEnvironmentKey = () => { + const toast = useToast(); + + return useMutation( + 'deleteEnvironmentKey', + async ({ account, id, key }: { account: string; id: string; key: string }) => { + return await deleteEnvironmentKey(account, id, key); + }, + { + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error deleting environment key', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/getAirflowDefault.ts b/src/core/workbench/app/src/features/admin/environments/api/getAirflowDefault.ts new file mode 100644 index 00000000..51f2a86e --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/getAirflowDefault.ts @@ -0,0 +1,20 @@ +import { useQuery } from 'react-query'; + +import { AirflowConfig, CodeServerConfig } from '../../../../context/UserContext/types'; +import { axios } from '../../../../lib/axios'; + +interface AdaptersDefaultValues { + airbyte: object; + airflow: AirflowConfig; + 'dbt-docs': object; + supersetIcon: object; + 'code-server': CodeServerConfig; +} + +export const getAdaptersDefaultValues = (): Promise => { + return axios.get('api/admin/adapters/default-values'); +}; + +export const useAdaptersDefaultValues = (options?: any) => { + return useQuery('adapters', () => getAdaptersDefaultValues(), options); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/getEnvironmentById.ts b/src/core/workbench/app/src/features/admin/environments/api/getEnvironmentById.ts new file mode 100644 index 00000000..ee1fbf98 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/getEnvironmentById.ts @@ -0,0 +1,12 @@ +import { useQuery } from 'react-query'; + +import { Environment } from '../../../../context/UserContext/types'; +import { axios } from '../../../../lib/axios'; + +export const getEnvironmentById = (account?: string, id?: string): Promise => { + return axios.get(`/api/admin/${account}/environments/${id}`); +}; + +export const useEnvironment = (account?: string, id?: string, options?: any) => { + return useQuery('environment', () => getEnvironmentById(account, id), options); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/getEnvironmentKeys.ts b/src/core/workbench/app/src/features/admin/environments/api/getEnvironmentKeys.ts new file mode 100644 index 00000000..a8297eb6 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/getEnvironmentKeys.ts @@ -0,0 +1,12 @@ +import { useQuery } from 'react-query'; + +import { EnvironmentKeys } from '../../../../context/UserContext/types'; +import { axios } from '../../../../lib/axios'; + +export const getEnvironmentKeys = (account?: string, id?: string): Promise => { + return axios.get(`/api/admin/${account}/environments/${id}/keys`); +}; + +export const useEnvironmentKeys = (account?: string, id?: string, options?: any) => { + return useQuery('environmentKeys', () => getEnvironmentKeys(account, id), options); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/getEnvironments.ts b/src/core/workbench/app/src/features/admin/environments/api/getEnvironments.ts new file mode 100644 index 00000000..a778763b --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/getEnvironments.ts @@ -0,0 +1,49 @@ +import { useQuery } from 'react-query'; + +import { Environment } from '../../../../context/UserContext/types'; +import { axios } from '../../../../lib/axios'; +import { IAPIPaginatedResponse } from '../../../../types'; + +interface IGetEnvironments { + account?: string; + search?: string; + project?: string; + limit?: number; + offset?: number; + options?: any; +} + +interface IEnvironmentsResponse extends IAPIPaginatedResponse { + results: Environment[]; +} + +export const getEnvironments = ({ + account: account, + ...rest +}: IGetEnvironments): Promise => { + return axios.get(`/api/admin/${account}/environments`, { params: rest }); +}; + +export const useEnvironments = ({ options: options, ...rest }: IGetEnvironments) => { + return useQuery(['environments', ...Object.values(rest)], () => getEnvironments(rest), { + enabled: rest.account !== undefined, + ...options, + }); +}; + +export const getAllEnvironments = ({ account }: { account?: string }): Promise => { + // Returns all environments (without pagination) + return axios.get(`/api/admin/${account}/environments`); +}; + +interface UseAllProjectsOptions { + account?: string; +} + +export const useAllEnvironments = ({ account }: UseAllProjectsOptions) => { + return useQuery({ + enabled: account !== undefined, + queryKey: ['environments', account], + queryFn: () => getAllEnvironments({ account }), + }); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/api/updateEnvironment.ts b/src/core/workbench/app/src/features/admin/environments/api/updateEnvironment.ts new file mode 100644 index 00000000..0011a36b --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/api/updateEnvironment.ts @@ -0,0 +1,42 @@ +import { useToast } from '@chakra-ui/react'; +import { createElement } from 'react'; +import { useMutation, useQueryClient } from 'react-query'; +import { useNavigate } from 'react-router-dom'; + +import ExpandibleToast from '../../../../components/ExpandibleToast/ExpandibleToast'; +import { axios } from '../../../../lib/axios'; + +export const updateEnvironment = (account: string, id: string, data: any) => { + return axios.put(`/api/admin/${account}/environments/${id}`, data); +}; + +export const useUpdateEnvironment = () => { + const navigate = useNavigate(); + const queryClient = useQueryClient(); + const toast = useToast(); + + return useMutation( + 'updateEnvironment', + async ({ account, id, body }: { account: string; id: string; body: any }) => { + return await updateEnvironment(account, id, body); + }, + { + onSuccess: () => { + queryClient.invalidateQueries('getUserInfo'); + navigate('/admin/environments'); + }, + onError: (error: any) => { + toast({ + render: () => { + return createElement(ExpandibleToast, { + message: 'Error updating environment', + extra: JSON.stringify(error.response.data), + status: 'error', + }); + }, + isClosable: true, + }); + }, + } + ); +}; diff --git a/src/core/workbench/app/src/features/admin/environments/components/AirflowConfigForm.tsx b/src/core/workbench/app/src/features/admin/environments/components/AirflowConfigForm.tsx new file mode 100644 index 00000000..1286d55b --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/components/AirflowConfigForm.tsx @@ -0,0 +1,272 @@ +import { Box, Text, HStack, VStack, Heading, Divider } from '@chakra-ui/react'; +import { FormikErrors } from 'formik'; +import { SelectControl, InputControl } from 'formik-chakra-ui'; +import React, { useEffect } from 'react'; + +import { User } from '../../../../context/UserContext/types'; + +interface Props { + currentUser: User | undefined; + dagsSource: string | undefined; + dagSyncAuth: string | undefined; + handleSelectDagsSource: (event: any) => void; + handleSelectDagSyncAuth: (event: any) => void; + airflowLogsBackend: string | undefined; + handleSelectLogsBackend: (event: any) => void; + airflowLogsExternal: boolean | undefined; + tabIndex: number; + validateForm: (values?: any) => Promise>; + handleChange: { + (e: React.ChangeEvent): void; + >(field: T): T extends React.ChangeEvent + ? void + : (e: string | React.ChangeEvent) => void; + }; +} + +function AirflowConfigForm(props: Props) { + const { + currentUser, + dagsSource, + handleSelectDagsSource, + dagSyncAuth, + handleSelectDagSyncAuth, + airflowLogsBackend, + handleSelectLogsBackend, + airflowLogsExternal, + validateForm, + tabIndex, + handleChange, + } = props; + + useEffect(() => { + tabIndex === 3 && validateForm(); + }, [tabIndex, validateForm, airflowLogsBackend, airflowLogsExternal, dagSyncAuth, dagsSource]); + + const isLocal = window.location.hostname === 'datacoveslocal.com'; + + return ( + <> + + + + + Airflow + Airflow specific settings + + + + + + Relative path to the folder where Python DAGs are located. + + + + + + Relative path to the folder where YAML DAGs are located. + + + + + + + + Relative path to a folder where the profiles.yml file is located. + + + + + + DAGs sync configuration + Where Airflow DAGs will be stored + + + { + handleSelectDagsSource(e); + handleChange(e); + validateForm(); + }} + isRequired + > + + + + + {dagsSource === 's3' && ( + + + + + + { + handleSelectDagSyncAuth(e); + handleChange(e); + validateForm(); + }} + isRequired + > + + + + + {dagSyncAuth === 'iam-user' && ( + + + + + + + + + )} + {dagSyncAuth === 'iam-role' && ( + + + + )} + + )} + {dagsSource === 'git' && ( + + + + Git branch to monitor for changes + + + )} + {(currentUser?.has_dynamic_blob_storage_provisioning || + currentUser?.has_dynamic_network_filesystem_provisioning || + isLocal) && ( + <> + + Logs configuration + Where Airflow logs will be stored + + + { + handleSelectLogsBackend(e); + handleChange(e); + validateForm(); + }} + > + + {!isLocal && + (currentUser?.has_dynamic_network_filesystem_provisioning ? ( + + ) : ( + + ))} + {!!currentUser?.features.select_minio_logs && isLocal && ( + + )} + {isLocal && } + + + + {airflowLogsExternal && ( + <> + {airflowLogsBackend === 's3' ? ( + <> + + + + + + + + + + + + + + + + + ) : ( + airflowLogsBackend === 'efs' && ( + <> + + + + + ) + )} + + )} + + )} + + + ); +} + +export default AirflowConfigForm; diff --git a/src/core/workbench/app/src/features/admin/environments/components/AirflowResourcesForm.tsx b/src/core/workbench/app/src/features/admin/environments/components/AirflowResourcesForm.tsx new file mode 100644 index 00000000..c7d8b914 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/components/AirflowResourcesForm.tsx @@ -0,0 +1,81 @@ +import { Box, Text, HStack, VStack, Heading, Divider } from '@chakra-ui/react'; +import { FormikErrors } from 'formik'; +import { capitalize } from 'lodash-es'; +import React, { Fragment, useEffect } from 'react'; + +import { MemoryInput } from './MemoryInput'; +import { SERVICES } from './utils'; + +interface Props { + tabIndex: number; + validateForm: (values?: any) => Promise>; + values: any; + setFieldValue: (field: string, value: any, shouldValidate?: boolean | undefined) => void; +} + +function AirflowResourcesForm(props: Props) { + const { validateForm, tabIndex, values, setFieldValue } = props; + + useEffect(() => { + tabIndex === 4 && validateForm(); + }, [tabIndex, validateForm]); + + return ( + + + + + Airflow resources + + Specify a memory request and a memory limit for different services + + + + + {SERVICES.map((service) => ( + + + + {capitalize(service)} configuration + + + + + + + + + + + + + ))} + + ); +} + +export default AirflowResourcesForm; diff --git a/src/core/workbench/app/src/features/admin/environments/components/BasicInfoForm.tsx b/src/core/workbench/app/src/features/admin/environments/components/BasicInfoForm.tsx new file mode 100644 index 00000000..6d15d30d --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/components/BasicInfoForm.tsx @@ -0,0 +1,80 @@ +import { HStack, VStack, Text } from '@chakra-ui/react'; +import { FormikErrors } from 'formik'; +import { InputControl, SelectControl } from 'formik-chakra-ui'; +import { useEffect } from 'react'; + +import { Project } from '../../../../context/UserContext/types'; + +interface Props { + handleSelectType: (event: any) => void; + handleProjectChange: (event: any) => void; + projects: Project[] | undefined; + validateForm: (values?: any) => Promise>; + tabIndex: number; +} + +const BasicInfoForm = ({ + handleSelectType, + handleProjectChange, + projects, + validateForm, + tabIndex, +}: Props) => { + useEffect(() => { + tabIndex === 0 && validateForm(); + }, [tabIndex, validateForm]); + return ( + + + + + + + + + + + + {projects?.map((project: Project) => { + return ( + + ); + })} + + + + + + + + + + Determines the libraries and extensions pre-installed on VSCode and Airflow workers. + + + + + ); +}; + +export default BasicInfoForm; diff --git a/src/core/workbench/app/src/features/admin/environments/components/CodeServerEnvForm.tsx b/src/core/workbench/app/src/features/admin/environments/components/CodeServerEnvForm.tsx new file mode 100644 index 00000000..5dc2db9c --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/components/CodeServerEnvForm.tsx @@ -0,0 +1,33 @@ +import { VStack, HStack } from '@chakra-ui/layout'; +import { Dispatch } from 'react'; + +import { EnvironmentVariablesDeletable } from '../../../../context/UserContext/types'; + +import { EnvironmentVariableModal } from './EnvironmentVariableModal'; +import { EnvironmentVariablesTable } from './EnvironmentVariablesTable'; + +interface Props { + setEnvironmentVariables: Dispatch>; + environmentVariables: EnvironmentVariablesDeletable; +} + +const CodeServerEnvForm = ({ setEnvironmentVariables, environmentVariables }: Props) => { + return ( + + + + + + + + + ); +}; + +export default CodeServerEnvForm; diff --git a/src/core/workbench/app/src/features/admin/environments/components/CodeServerResourcesForm.tsx b/src/core/workbench/app/src/features/admin/environments/components/CodeServerResourcesForm.tsx new file mode 100644 index 00000000..afc52251 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/components/CodeServerResourcesForm.tsx @@ -0,0 +1,61 @@ +import { Box, Text, HStack, VStack, Heading } from '@chakra-ui/react'; + +import { MemoryInput } from './MemoryInput'; + +interface Props { + values: any; + setFieldValue: (field: string, value: any, shouldValidate?: boolean | undefined) => void; +} + +function CodeServerResourcesForm(props: Props) { + const { values, setFieldValue } = props; + + return ( + + + + + Code Server resources + Specify a memory request and a memory limit for code-server + + + + + + + + + + + + + + + ); +} + +export default CodeServerResourcesForm; diff --git a/src/core/workbench/app/src/features/admin/environments/components/DocsForm.tsx b/src/core/workbench/app/src/features/admin/environments/components/DocsForm.tsx new file mode 100644 index 00000000..caa1fd9c --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/components/DocsForm.tsx @@ -0,0 +1,32 @@ +import { VStack, Box, Heading, Text } from '@chakra-ui/layout'; +import { FormikErrors } from 'formik'; +import { InputControl } from 'formik-chakra-ui'; +import { useEffect } from 'react'; + +interface Props { + validateForm: (values?: any) => Promise>; + tabIndex: number; +} + +const DocsForm = ({ validateForm, tabIndex }: Props) => { + useEffect(() => { + (tabIndex === 5 || tabIndex === 3) && validateForm(); + }, [tabIndex, validateForm]); + + return ( + + + Docs + dbt docs specific settings + + + + + Git branch where dbt docs were generated + + + + ); +}; + +export default DocsForm; diff --git a/src/core/workbench/app/src/features/admin/environments/components/EnvironmentKeyBox.tsx b/src/core/workbench/app/src/features/admin/environments/components/EnvironmentKeyBox.tsx new file mode 100644 index 00000000..0c4d2251 --- /dev/null +++ b/src/core/workbench/app/src/features/admin/environments/components/EnvironmentKeyBox.tsx @@ -0,0 +1,29 @@ +import { Box, Button, FormLabel, HStack, Textarea, VStack } from '@chakra-ui/react'; + +interface EnvironmentKeyBoxProps { + label: string; + value: string | undefined; + onCopy: () => void; +} + +export const EnvironmentKeyBox: React.FC = ({ label, value, onCopy }) => { + return ( + + + + + + + {label} +